code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = {}
... | 15 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
... | 15 | 1 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_devic... | 15 | lowerCAmelCase : Tuple =0 # The first color of the flag.
lowerCAmelCase : Union[str, Any] =1 # The second color of the flag.
lowerCAmelCase : Any =2 # The third color of the flag.
lowerCAmelCase : List[str] =(red, white, blue)
def A__ ( __A... | 15 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
lowerCAmelCase : ... | 15 | from __future__ import annotations
lowerCAmelCase : int =[]
def A__ ( __A , __A , __A ):
'''simple docstring'''
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i]... | 15 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in... | 15 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCAmelCase : int ... | 15 | 1 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def A__ ( ... | 15 | def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 0
for ch in input_str:
_lowerCamelCase : Optional[Any] = ord(__A )
_lowerCamelCase : List[str] = pow(2 , __A )
# If we already turned o... | 15 | 1 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = (UnCLIPScheduler,)
def _SCREAMING_SNAKE_CASE ( self :... | 15 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@req... | 15 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCAmelCase : Union[str, Any] =False
class __snake_case ( unitt... | 15 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
... | 15 | 1 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowerCAmelCase : Optional[int] =l... | 15 | from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ... | 15 | 1 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def... | 15 | import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A__ ( ... | 15 | 1 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import... | 15 | from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K)
def A__ ( __A , __A , __A , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Dono... | 15 | 1 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
... | 15 | import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = {}
... | 15 | 1 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
lowerCAmelCase : str =0b1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_... | 15 | import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import A... | 15 | 1 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversa... | 15 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
... | 15 | 1 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : List[Any] =get_tests_dir("fixtures/test_sentencepiece_with_... | 15 | import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = (EulerDiscreteScheduler,)
... | 15 | 1 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import j... | 15 | import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.json"}
lowerCAmelCase : List[str] ... | 15 | 1 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
... | 15 | import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (... | 15 | 1 |
from ...configuration_utils import PretrainedConfig
lowerCAmelCase : str ={
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapa... | 15 | import math
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not numbe... | 15 | 1 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET... | 15 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image... | 15 | 1 |
from PIL import Image
def A__ ( __A , __A ):
'''simple docstring'''
def brightness(__A ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ... | 15 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformer... | 15 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_forma... | 700 | import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ArgumentParser(
... | 15 | 0 |
from datetime import datetime
import requests
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
_lowerCamelCase : Any = requests.... | 701 | def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
if n == 0:
return 0
_lowerCamelCase : Tuple = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max(
__A ,... | 15 | 0 |
'''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = 9
_lowerCamelCase : Union[str, Any] = [
[0, 1, 4],
[... | 702 | from __future__ import annotations
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ke... | 15 | 0 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from acc... | 703 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
... | 15 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def A__ ( __A ):
'''simple docstri... | 704 | lowerCAmelCase : Tuple =0 # The first color of the flag.
lowerCAmelCase : Union[str, Any] =1 # The second color of the flag.
lowerCAmelCase : Any =2 # The third color of the flag.
lowerCAmelCase : List[str] =(red, white, blue)
def A__ ( __A... | 15 | 0 |
from __future__ import annotations
def A__ ( __A , __A = None , __A = None , __A = False , ):
'''simple docstring'''
_lowerCamelCase : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )]
# If the argument is None or the user ... | 705 | from __future__ import annotations
lowerCAmelCase : int =[]
def A__ ( __A , __A , __A ):
'''simple docstring'''
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i]... | 15 | 0 |
lowerCAmelCase : Dict ={"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
lowerCAmelCase : Any =["a", "b", "c", "d", "e"]
def A__ ( __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = star... | 706 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCAmelCase : int ... | 15 | 0 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Union[str, Any] =logging.get_logger(__... | 707 | def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 0
for ch in input_str:
_lowerCamelCase : Optional[Any] = ord(__A )
_lowerCamelCase : List[str] = pow(2 , __A )
# If we already turned o... | 15 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase : List[Any] ={
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"... | 708 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@req... | 15 | 0 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
... | 709 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
... | 15 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] ={
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise Opt... | 710 | from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ... | 15 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipeline... | 711 | import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A__ ( ... | 15 | 0 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeli... | 712 | from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K)
def A__ ( __A , __A , __A , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Dono... | 15 | 0 |
from collections.abc import Sequence
def A__ ( __A , __A = False ):
'''simple docstring'''
if not arr:
return 0
_lowerCamelCase : Tuple = 0 if allow_empty_subarrays else float("""-inf""" )
_lowerCamelCas... | 713 | import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = {}
... | 15 | 0 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCAmelCase : Dict =argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=... | 714 | import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import A... | 15 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_det... | 715 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
... | 15 | 0 |
from __future__ import annotations
from fractions import Fraction
def A__ ( __A , __A ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A__ ( __A ):
... | 716 | import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = (EulerDiscreteScheduler,)
... | 15 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def A__ ( __A ):
... | 717 | import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.json"}
lowerCAmelCase : List[str] ... | 15 | 0 |
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : int = [1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = 0, 0, 0
_lowerCamelCase : List[str] = ugly_nums[ia] * 2
_lowerCamelCas... | 718 | import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (... | 15 | 0 |
from __future__ import annotations
lowerCAmelCase : Tuple ={
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""... | 719 | import math
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not numbe... | 15 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffu... | 720 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image... | 15 | 0 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_av... | 721 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformer... | 15 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Tuple =logging.get_logger(__name__)
# TODO: upload to AWS
lowerCAmelCase : Optional[Any] ={
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-unc... | 700 | import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ArgumentParser(
... | 15 | 0 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
Efficien... | 701 | def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
if n == 0:
return 0
_lowerCamelCase : Tuple = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max(
__A ,... | 15 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
lowerCAmelCase : str ={"""vocab_file""": """v... | 702 | from __future__ import annotations
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ke... | 15 | 0 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_mult... | 703 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
... | 15 | 0 |
def A__ ( __A ):
'''simple docstring'''
def merge(__A , __A ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return l... | 704 | lowerCAmelCase : Tuple =0 # The first color of the flag.
lowerCAmelCase : Union[str, Any] =1 # The second color of the flag.
lowerCAmelCase : Any =2 # The third color of the flag.
lowerCAmelCase : List[str] =(red, white, blue)
def A__ ( __A... | 15 | 0 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from t... | 705 | from __future__ import annotations
lowerCAmelCase : int =[]
def A__ ( __A , __A , __A ):
'''simple docstring'''
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i]... | 15 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGEN... | 706 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCAmelCase : int ... | 15 | 0 |
def A__ ( __A , __A ):
if not (isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
_lowerCamelCase : Optional[int] = len(UpperCAmel... | 707 | def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 0
for ch in input_str:
_lowerCamelCase : Optional[Any] = ord(__A )
_lowerCamelCase : List[str] = pow(2 , __A )
# If we already turned o... | 15 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
class __snake_case ( a__ ):
'''simple docstring'''
... | 708 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@req... | 15 | 0 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A__ ( __A , __A , __A ):
'''simple docstring'''
if g... | 709 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
... | 15 | 0 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, ... | 710 | from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ... | 15 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ArgumentParser(
description=(
... | 711 | import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A__ ( ... | 15 | 0 |
import random
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = num - 1
_lowerCamelCase : Optional[int] = 0
while s % 2 == 0:
_lowerCamelCase : Any = s // 2
t += 1
... | 712 | from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K)
def A__ ( __A , __A , __A , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Dono... | 15 | 0 |
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : List[str] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowerCamelCase : Optional[Any] = """"""
_lowerCamelCase : Optional[Any] = ... | 713 | import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = {}
... | 15 | 0 |
from __future__ import annotations
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : int = []
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Tuple... | 714 | import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import A... | 15 | 0 |
from __future__ import annotations
from typing import Any
class __snake_case :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : int) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelC... | 715 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
... | 15 | 0 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowerCAmelCase : ... | 716 | import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = (EulerDiscreteScheduler,)
... | 15 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils... | 717 | import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.json"}
lowerCAmelCase : List[str] ... | 15 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : List[Any] ={"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "... | 718 | import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (... | 15 | 0 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def A__ ( __A ):
... | 719 | import math
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not numbe... | 15 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCAmelCase : Optional[int] =logging.get_logger(__name__) # pylint: dis... | 720 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image... | 15 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : int ={
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-l... | 721 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformer... | 15 | 0 |
import os
# Precomputes a list of the 100 first triangular numbers
lowerCAmelCase : List[Any] =[int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[Any] = os.path.dirname(os.path.realpath(... | 700 | import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ArgumentParser(
... | 15 | 0 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_... | 701 | def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
if n == 0:
return 0
_lowerCamelCase : Tuple = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max(
__A ,... | 15 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A__ ( __A ):
'''simple docstring'''
if (
(cp >= 0X4e00 and cp <= 0X9... | 702 | from __future__ import annotations
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ke... | 15 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase ... | 703 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
... | 15 | 0 |
from __future__ import annotations
lowerCAmelCase : List[str] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class __snake_case :
'''simple docstring'''
... | 704 | lowerCAmelCase : Tuple =0 # The first color of the flag.
lowerCAmelCase : Union[str, Any] =1 # The second color of the flag.
lowerCAmelCase : Any =2 # The third color of the flag.
lowerCAmelCase : List[str] =(red, white, blue)
def A__ ( __A... | 15 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Any =logging.get_logger(__name__)
lowerCAmelCase : st... | 705 | from __future__ import annotations
lowerCAmelCase : int =[]
def A__ ( __A , __A , __A ):
'''simple docstring'''
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i]... | 15 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] ={
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig... | 706 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCAmelCase : int ... | 15 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCAmelCase : Any =""
lowerCAmelCase : Any =""
lowerCAmelCase : Optional[int] =""
lowerCAmelCase : List[str] =1 # (0 is vertical, 1 is horizontal)
def ... | 707 | def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 0
for ch in input_str:
_lowerCamelCase : Optional[Any] = ord(__A )
_lowerCamelCase : List[str] = pow(2 , __A )
# If we already turned o... | 15 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_a... | 708 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@req... | 15 | 0 |
from datetime import datetime as dt
import os
from github import Github
lowerCAmelCase : int =[
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def A__ ( ):
'''simpl... | 709 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
... | 15 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppT... | 710 | from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ... | 15 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
lowerCAmelCase : str ={
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.js... | 711 | import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A__ ( ... | 15 | 0 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_ima... | 712 | from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K)
def A__ ( __A , __A , __A , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Dono... | 15 | 0 |
class __snake_case :
'''simple docstring'''
def __init__( self : str) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Tuple = """"""
_lowerCamelCase : int = """"""
_... | 713 | import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = {}
... | 15 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : str ={
"microsoft/focalnet-tiny... | 714 | import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import A... | 15 | 0 |
from __future__ import annotations
import requests
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : List[str] = F"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(_UpperCAmelCase ).json()
... | 715 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
... | 15 | 0 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
... | 716 | import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = (EulerDiscreteScheduler,)
... | 15 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
imp... | 717 | import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.json"}
lowerCAmelCase : List[str] ... | 15 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_avai... | 718 | import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (... | 15 | 0 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import C... | 719 | import math
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not numbe... | 15 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
... | 720 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image... | 15 | 0 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowerCAmelCase : str =get... | 721 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformer... | 15 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : Optional[int] ={"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAECo... | 700 | import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ArgumentParser(
... | 15 | 0 |
import math
import unittest
def A__ ( __A ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
... | 701 | def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
if n == 0:
return 0
_lowerCamelCase : Tuple = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max(
__A ,... | 15 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase : Optional[int] ={
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenizati... | 702 | from __future__ import annotations
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ke... | 15 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataColl... | 703 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
... | 15 | 0 |
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=__lowercase ):
'''simple docstring'''
_snake_case = ['keras_nlp']
def __init__( self : Any , *_UpperCamelCase : Dict , **_UpperCamelCase ... | 704 | lowerCAmelCase : Tuple =0 # The first color of the flag.
lowerCAmelCase : Union[str, Any] =1 # The second color of the flag.
lowerCAmelCase : Any =2 # The third color of the flag.
lowerCAmelCase : List[str] =(red, white, blue)
def A__ ( __A... | 15 | 0 |
from string import ascii_uppercase
lowerCAmelCase : Optional[Any] ={str(ord(c) - 55): c for c in ascii_uppercase}
def A__ ( __A , __A ):
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""int() can'... | 705 | from __future__ import annotations
lowerCAmelCase : int =[]
def A__ ( __A , __A , __A ):
'''simple docstring'''
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i]... | 15 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available... | 706 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCAmelCase : int ... | 15 | 0 |
from __future__ import annotations
def A__ ( __A ):
return len(set(_snake_case ) ) == len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 | def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 0
for ch in input_str:
_lowerCamelCase : Optional[Any] = ord(__A )
_lowerCamelCase : List[str] = pow(2 , __A )
# If we already turned o... | 15 | 0 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowerCAmelCase : Dict ={
# 1536-bit
5: ... | 708 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@req... | 15 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_... | 709 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
... | 15 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
imp... | 710 | from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ... | 15 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Tuple =logging.get_logger(__name__... | 711 | import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A__ ( ... | 15 | 0 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowerCAmelCase : ... | 712 | from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K)
def A__ ( __A , __A , __A , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Dono... | 15 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
... | 713 | import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = {}
... | 15 | 0 |
from random import shuffle
import tensorflow as tf
from numpy import array
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Any = int(lowerCamelCase__ )
assert noofclusters < len(lowerCamelCase__ )
# Find out the dimensio... | 714 | import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import A... | 15 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.