code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
... | 709 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa... | 11 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTo... | 710 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
... | 11 | 0 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTest... | 711 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : O... | 11 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgume... | 712 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokeniz... | 11 | 0 |
'''simple docstring'''
from math import factorial
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : int,__A : List[Any] ):
_lowerCamelCase : Dict = real
if isinstance(__A,__A ):
_lowerCamelCas... | 713 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) *... | 11 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutp... | 714 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
fro... | 11 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import requ... | 715 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPIN... | 11 | 0 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_n... | 716 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tok... | 11 | 0 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serializatio... | 717 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_av... | 11 | 0 |
'''simple docstring'''
import requests
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Tuple = {"Content-Type": "application/json"}
_lowerCamelCase : Optional[Any] = requests.... | 718 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ ... | 11 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'ut/deta': 'https://huggingface.co... | 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
... | 11 | 0 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
... | 720 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.nu... | 11 | 0 |
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for i in range(len(_lowerCAmelCase ) - 1 , 0 , -1 ):
_lowerCamelCase : Tuple = False
for j in range(_lowerCAmelCase , 0 , -1 ):
if unsor... | 721 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_t... | 11 | 0 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCAmelCase__ ( nn.Module ):
def __init__( self : Tuple,__A : int = 1_6,__A : int = 8_8,__A : ... | 700 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
... | 11 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneT... | 701 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowe... | 11 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_avai... | 702 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import H... | 11 | 0 |
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
UpperCAmelCase_ : str = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': o... | 703 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ... | 11 | 0 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self : int ):
_lowerCamelCase : dict[str, TrieNode] = {} # Mapping from char to TrieNode
_lowerCamelCase : Optional[int] = False
def lowerCamelCase_ ( self ... | 704 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available()... | 11 | 0 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
UpperCAmelCase_ : Tuple = {
# 1536-bit
5: {
... | 705 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = ... | 11 | 0 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase ... | 706 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : ... | 11 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : Any,*__A ... | 707 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase ... | 11 | 0 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSample... | 708 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
... | 11 | 0 |
import os
import string
import sys
UpperCAmelCase_ : List[Any] = 1 << 8
UpperCAmelCase_ : int = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KE... | 709 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa... | 11 | 0 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self : int,__A : list[int] ):
_lowerCamelCase : Any = len(__A )
_lowerCamelCase : List[Any] = [0] * len_array
if len_array > 0:
_lowe... | 710 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
... | 11 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : List[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'token... | 711 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : O... | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
return "\n".join(
F'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multipli... | 712 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokeniz... | 11 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
U... | 713 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) *... | 11 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase_ : Any = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torc... | 714 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
fro... | 11 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = (DDPMScheduler,)
def lowerCamelCase_ ( self : str,**__A : List[str] ):
... | 715 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPIN... | 11 | 0 |
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = input("Enter message: " )
_lowerCamelCase : List[str] = input("Enter key ... | 716 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tok... | 11 | 0 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit... | 717 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_av... | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str = " " ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = 0
for index, char in enumerate(_lowerCAmelCase ... | 718 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ ... | 11 | 0 |
'''simple docstring'''
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = {
'tiny.en': 'https://openaipublic.azureed... | 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
... | 11 | 0 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
fro... | 720 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.nu... | 11 | 0 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""si... | 721 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_t... | 11 | 0 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_t... | 700 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
... | 11 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( ... | 701 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowe... | 11 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : str = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch... | 702 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import H... | 11 | 0 |
'''simple docstring'''
from math import loga
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
... | 703 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ... | 11 | 0 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
UpperCAmelCase_ : Union[str, Any] = {
'fac... | 704 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available()... | 11 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : Any = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Dei... | 705 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = ... | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
if len(_lowerCAmelCase ) == 0:
return []
_lowerCamelCase : Union[str, Any] = min(_lowerCAmelCase ), max(_... | 706 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : ... | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] ):
"""simple docstring"""
if not nums:
return 0
_lowerCamelCase : List[Any] = nums[0]
_lowerCamelCase : Tuple = 0
... | 707 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase ... | 11 | 0 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
UpperCAmelCase_ : List[Any] = [
# tf -> hf
('/', '.'),
('layer_', 'layers.... | 708 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
... | 11 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_tr... | 709 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa... | 11 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
... | 710 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
... | 11 | 0 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A_ ( _lowerCAmelCase : str = "laptop" ):
"""simple docstring"""
_lowerCamelCase : Dict = F'https://www.amazon.in/lapt... | 711 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : O... | 11 | 0 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCAmelCase__ ( datasets.BeamBasedBuilder ):
def lowerCamelCase_ ( ... | 712 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokeniz... | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
while b:
_lowerCamelCase : List[str] = b, a % b
return a
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase ... | 713 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) *... | 11 | 0 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf... | 714 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
fro... | 11 | 0 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A_ ( _lowerCAmelCase : int = 8 ):
"""simple docstring"""
_lowerCamelCase : str = ascii_letters + ... | 715 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPIN... | 11 | 0 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
UpperCAmelCase_ : List[Any] = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns... | 716 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tok... | 11 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCAmelCase_ : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies ... | 717 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_av... | 11 | 0 |
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
fro... | 718 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ ... | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCas... | 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
... | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ) -> List[str]:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perf... | 720 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.nu... | 11 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_u... | 721 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_t... | 11 | 0 |
'''simple docstring'''
import random
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : bool = False ):
"""simple docstring"""
_lowerCamelCase : dict = {i: [] for i in range(_lowerCAmelCase )}
# if p... | 700 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
... | 11 | 0 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
UpperCAmelCase_ : Optional[Any] = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thiri... | 701 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowe... | 11 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_att... | 702 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import H... | 11 | 0 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
fro... | 703 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ... | 11 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.u... | 704 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available()... | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : bool = False ):
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly che... | 705 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = ... | 11 | 0 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all e... | 706 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : ... | 11 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import j... | 707 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase ... | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase__ :
def __init__( self : Tuple,__A : int ):
_lowerCamelCase : Union[str, Any] = num_of_nodes
_lowerCamelCase : list[list[int]] ... | 708 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
... | 11 | 0 |
def A_ ( _lowerCAmelCase : int = 600851475143 ):
"""simple docstring"""
try:
_lowerCamelCase : int = int(_lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
... | 709 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa... | 11 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from s... | 710 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
... | 11 | 0 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ : int... | 711 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : O... | 11 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
... | 712 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokeniz... | 11 | 0 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase__ ( A ):
@require_torch
def lowerCamelCase_ ( self : int )... | 713 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) *... | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
for i in range(0 , _lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
... | 714 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
fro... | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa... | 715 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPIN... | 11 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ ... | 716 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tok... | 11 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_availab... | 717 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_av... | 11 | 0 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_... | 718 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ ... | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
return [ord(_lowerCAmelCase ) - 96 for elem in plain]
def A_ ( _lowerCAmelCase : list[int] ):
"""simple docstrin... | 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
... | 11 | 0 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.... | 720 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.nu... | 11 | 0 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = DistilBertTokenizer
lowe... | 721 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_t... | 11 | 0 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _lowercase ( __lowerC... | 12 |
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - ge... | 12 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_util... | 12 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Tuple = (DDPMScheduler,)
def _a ( self , **_a ) -> ... | 12 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_proce... | 12 |
"""simple docstring"""
import os
a :List[str] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
def _lowercase ( __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Dict = 0
while in... | 12 | 1 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
SCREAMING_SNAKE_CASE__ : Dict = 0
... | 12 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import T... | 12 | 1 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils im... | 12 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a :List[Any] = logging.get_logger(__name__)
a :Optional[int] = {
"microsoft/focalnet-tiny":... | 12 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditional... | 12 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.nu... | 12 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_t... | 12 |
"""simple docstring"""
a :List[str] = [
(1_000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def _lowercase ( __lowerCAmelCase ) -> ... | 12 | 1 |
"""simple docstring"""
# Imports
import numpy as np
class __a :
'''simple docstring'''
def __init__( self , _a=None , _a=None , _a=None , _a=None , _a=None ) -> Dict:
"""simple docstring"""
self.set_matricies(red=_a , green=_a , ... | 12 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a :Any = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_A... | 12 | 1 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipeline... | 12 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from ... | 12 | 1 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> int:
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Input value must be a 'in... | 12 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a :Optional[Any] = logging.get_logger(__name__)
a :Union[str, Any] = {
"t5-small": "https://huggingface.co/t5-small/r... | 12 | 1 |
"""simple docstring"""
import unittest
import numpy as np
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : Any = np.shape(__lowerCAmelCase )
SCREAMIN... | 12 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
a :Optional[Any] = [8, 5, 9, 7]
a :List[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
a :int = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5... | 12 | 1 |
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = (CMStochasticIterativeScheduler,)
_SCRE... | 12 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .token... | 12 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path ... | 12 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(__lowerCAmelCase ) + 1
SCREAMING_SNAKE_CASE__ : int = len(__lowerCAmelCase ) + 1
# dp is a 2d mat... | 12 | 1 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transf... | 12 |
"""simple docstring"""
from math import sqrt
def _lowercase ( __lowerCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all mu... | 12 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configurati... | 12 |
"""simple docstring"""
class __a :
'''simple docstring'''
def __init__( self , _a , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = name
SCREAMING_SNAKE_CASE__ : Optional[Any] = ... | 12 | 1 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> str:
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was... | 12 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, loggin... | 12 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
a :Optional[int] = list[tuple[int, int]]
a :Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0... | 12 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
... | 12 | 1 |
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCS... | 12 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __a (UpperCamelCase_):
'''simple docstring'''
def _a ( self , _a ) -> Union[str, Any]:
"""simple docstring"""
... | 12 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
... | 12 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ... | 12 | 1 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = len(__lowerCAmelCase )
for i in range(length - 1 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = i
for k in r... | 12 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
impo... | 12 | 1 |
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a :str = get_logger(__name__)
class __a (enum.Enum):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] ... | 12 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
SCREAMING_SNAKE_CASE__ : List[Any] = 1
SCREAMING_SNAKE_CASE__ : int = 1
while repunit:
SCREAMING_SNA... | 12 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.