code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class lowerCamelCase ( lowercase__ ): '''simple docstring''' lowerCAmelCase_ : str = 'EncodecFeatureEx...
707
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import ...
23
0
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class lowerCamelCase ( l...
708
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_availa...
23
0
def snake_case__ ( __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 22 ) -> int: UpperCAmelCase_ = range(1 , __SCREAMING_SNAKE_CASE ) UpperCAmelCase_ = range(1 , __SCREAMING_SNAKE_CASE ) return sum( 1 for power in powers for base in bases if len(...
709
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int: UpperCAmelCase_ = [1] UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0, 0, 0 UpperCAmelCase_ = ugly_nums[ia] * 2 UpperCAmelCase_ = ugly_nums[ia] * 3 UpperCAmelCase_ = ugly_nums[i...
23
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE = logging.get_logger(__name__) SCREAMING_SNAKE_CASE = { "xlm-roberta-base": "https://h...
710
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test...
23
0
from statistics import mean, stdev def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 3 ) -> list: UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE ) UpperCAmelCase_ = max(__SCREAMING_SNAKE_CASE ) # normalize data return [round((x - x_min) / (x_max - x_mi...
711
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available SCREAMING_SNAKE_CASE = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDepe...
23
0
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation SCREAMING_SNAKE_CASE ...
712
import math def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list[int]: UpperCAmelCase_ = [] UpperCAmelCase_ = 2 UpperCAmelCase_ = int(math.sqrt(__SCREAMING_SNAKE_CASE ) ) # Size of every segment UpperCAmelCase_ = [True] * (end + 1) UpperCAmelCase_ = ...
23
0
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_availabl...
713
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor impor...
23
0
import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE...
714
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE = { "configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"], } try: if not is_torch_available(): ...
23
0
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...uti...
715
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE = logging.get_logger(__name__) SCREAMING_SNAKE_CASE = { "xlm-roberta-base": "https://h...
23
0
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, ren...
716
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str: UpperCAmelCase_ = int(__SCREAMING_SNAKE_CASE ) if decimal in (0, 1): # Exit cases for the recursion return str(__SCREAMING_SNAKE_CASE ) UpperCAmelCase_ , UpperCAmelCase_ = divmod(__SCREAMING_SNAKE_CASE , 2 ) re...
23
0
from math import factorial def snake_case__ ( __SCREAMING_SNAKE_CASE = 20 ) -> int: UpperCAmelCase_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... UpperCAmelCase_ = n // 2 return int(factorial(__SCREAMING_SNAKE_CASE ) / (factorial(...
717
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @requi...
23
0
from __future__ import annotations def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> dict[str, float]: if (voltage, current, resistance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if resistance < 0: ...
718
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]: UpperCAmelCase_ = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAM...
23
0
'''simple docstring''' import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import To...
719
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) fr...
23
0
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def snake_case__ ( __SCREAMING_SNAKE_CASE = 3 ) -> qiskit.result.counts.Counts: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise Typ...
720
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int: UpperCAmelCase_ = 1 for i in range(1 , num + 1 ): fact *= i return fact def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int: UpperCAmelCase_ = 0 while number > 0: UpperCAmelCase_ = number % ...
23
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.se...
721
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MA...
23
0
import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMIN...
700
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ...
23
0
'''simple docstring''' import requests def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None: UpperCAmelCase_ = {"Content-Type": "application/json"} UpperCAmelCase_ = requests.post(__SCREAMING_SNAKE_CASE , json={"text": message_body} , ...
701
import heapq as hq import math from collections.abc import Iterator class lowerCamelCase : '''simple docstring''' def __init__( self , lowerCAmelCase ): UpperCAmelCase_ = str(id_ ) UpperCAmelCase_ = None UpperCAmelCase_ = Non...
23
0
import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self , *lowerCAmel...
702
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]} try...
23
0
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class lowerCamelCase ( unittest.TestCase ): '''simple d...
703
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer SCREAMING_SNAKE_CASE = logging.get_logger(__name__) SCREAMING...
23
0
import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def A__ ( self ): UpperCAmelCase_ = inspec...
704
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer SCREAMING_SNAKE_CASE = logging.get_logger(__name__) SCREAMING...
23
0
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int: UpperCAmelCa...
705
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.array: UpperCAmelCase_ = f'''{sampling_rate}''' UpperCAmelCase_ = "1" UpperCAmelCase...
23
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by appli...
706
from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class lowerCamelCase ( lowercase__, lowercase__ ): '''simple docstring''' @register_to_...
23
0
'''simple docstring''' def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int: if index == number_of_items: return 0 UpperCAmelCase_ = 0 UpperCAmelCase_ = ...
707
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import ...
23
0
from bisect import bisect from itertools import accumulate def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]: UpperCAmelCase_ = sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNA...
708
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_availa...
23
0
from typing import List from .keymap import KEYMAP, get_character def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Any: def decorator(__SCREAMING_SNAKE_CASE ): UpperCAmelCase_ = getattr(__SCREAMING_SNAKE_CASE , "handle_key" , [] ) handle += [key] ...
709
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int: UpperCAmelCase_ = [1] UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0, 0, 0 UpperCAmelCase_ = ugly_nums[ia] * 2 UpperCAmelCase_ = ugly_nums[ia] * 3 UpperCAmelCase_ = ugly_nums[i...
23
0
import tensorflow as tf from ...tf_utils import shape_list class lowerCamelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=1 , lowerCAmelCase=Fal...
710
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test...
23
0
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): ...
711
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available SCREAMING_SNAKE_CASE = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDepe...
23
0
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int: UpperCAmelCase_ , UpperCAmelCase_ = len(__SCREAMING_SNAKE_CASE ), len(grid[0] ) if ( min(__SCREAMING_SNAKE_CASE , __SCREAMING_SN...
712
import math def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list[int]: UpperCAmelCase_ = [] UpperCAmelCase_ = 2 UpperCAmelCase_ = int(math.sqrt(__SCREAMING_SNAKE_CASE ) ) # Size of every segment UpperCAmelCase_ = [True] * (end + 1) UpperCAmelCase_ = ...
23
0
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(lowercase__ ), 'Tatoeba directory ...
713
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor impor...
23
0
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils imp...
714
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE = { "configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"], } try: if not is_torch_available(): ...
23
0
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int: UpperCAmelCase_ = 1 for i in range(1 , num + 1 ): fact *= i return fact def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int: UpperCAmelCase_ = 0 while number > 0: UpperCAmelCase_ = number % 10 sum_...
715
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE = logging.get_logger(__name__) SCREAMING_SNAKE_CASE = { "xlm-roberta-base": "https://h...
23
0
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class lowerCamelCase ( datasets.BeamBasedBuilder ): '''simple docstring''' def A__ ...
716
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str: UpperCAmelCase_ = int(__SCREAMING_SNAKE_CASE ) if decimal in (0, 1): # Exit cases for the recursion return str(__SCREAMING_SNAKE_CASE ) UpperCAmelCase_ , UpperCAmelCase_ = divmod(__SCREAMING_SNAKE_CASE , 2 ) re...
23
0
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaag...
717
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @requi...
23
0
import re def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list: return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )] def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str: UpperCAmelCase_ = split_input(str_ ) return "".join( ["".join([char...
718
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]: UpperCAmelCase_ = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAM...
23
0
'''simple docstring''' import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class lowerCamelCase ( unittest.TestCase ): '''si...
719
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) fr...
23
0
from __future__ import annotations def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list[int]: UpperCAmelCase_ = 2 UpperCAmelCase_ = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(__SCREAMING_SNAKE_CASE ) if n > 1:...
720
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int: UpperCAmelCase_ = 1 for i in range(1 , num + 1 ): fact *= i return fact def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int: UpperCAmelCase_ = 0 while number > 0: UpperCAmelCase_ = number % ...
23
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE = { "configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"], } try: if not is_torch_available(): raise...
721
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MA...
23
0
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> tuple: return (da...
700
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ...
23
0
'''simple docstring''' def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> bool: UpperCAmelCase_ = 0 for ch in input_str: UpperCAmelCase_ = ord(__SCREAMING_SNAKE_CASE ) UpperCAmelCase_ = pow(2 , __SCREAMING_SNAKE_CASE ) # If we already turned on bit for curre...
701
import heapq as hq import math from collections.abc import Iterator class lowerCamelCase : '''simple docstring''' def __init__( self , lowerCAmelCase ): UpperCAmelCase_ = str(id_ ) UpperCAmelCase_ = None UpperCAmelCase_ = Non...
23
0
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import re...
702
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]} try...
23
0
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]: UpperCAmelCase_ = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAM...
703
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer SCREAMING_SNAKE_CASE = logging.get_logger(__name__) SCREAMING...
23
0
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableU...
704
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer SCREAMING_SNAKE_CASE = logging.get_logger(__name__) SCREAMING...
23
0
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_...
705
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.array: UpperCAmelCase_ = f'''{sampling_rate}''' UpperCAmelCase_ = "1" UpperCAmelCase...
23
0
import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ...
706
from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class lowerCamelCase ( lowercase__, lowercase__ ): '''simple docstring''' @register_to_...
23
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) SCREAMING_SNAKE_CASE = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_AR...
707
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import ...
23
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @requ...
708
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_availa...
23
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE = { "configuration_efficientformer": [ "EFFICIENTFORMER_PRETRAINED_CO...
709
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int: UpperCAmelCase_ = [1] UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0, 0, 0 UpperCAmelCase_ = ugly_nums[ia] * 2 UpperCAmelCase_ = ugly_nums[ia] * 3 UpperCAmelCase_ = ugly_nums[i...
23
0
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str: if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) UpperCAmelCase_ = str(bin(__SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b" UpperCAmelCase_ = str(bin(__SCR...
710
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test...
23
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE = { "configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"], } try: if not is_torch_available(): raise OptionalDepend...
711
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available SCREAMING_SNAKE_CASE = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDepe...
23
0
import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def snake_case__ ( __SCREAMING_SNAKE_CASE = 8 ) -> str: UpperCAmelCase_ = ascii_letters + digits + punctuation return "".join(secrets.choice(__SCREAMING_SNAK...
712
import math def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list[int]: UpperCAmelCase_ = [] UpperCAmelCase_ = 2 UpperCAmelCase_ = int(math.sqrt(__SCREAMING_SNAKE_CASE ) ) # Size of every segment UpperCAmelCase_ = [True] * (end + 1) UpperCAmelCase_ = ...
23
0
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def snake_case__ ( ) -> List[Any]: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.ra...
713
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor impor...
23
0
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoic...
714
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE = { "configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"], } try: if not is_torch_available(): ...
23
0
from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.ndarray: UpperCAmelCase_ =...
715
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE = logging.get_logger(__name__) SCREAMING_SNAKE_CASE = { "xlm-roberta-base": "https://h...
23
0
import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobe...
716
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str: UpperCAmelCase_ = int(__SCREAMING_SNAKE_CASE ) if decimal in (0, 1): # Exit cases for the recursion return str(__SCREAMING_SNAKE_CASE ) UpperCAmelCase_ , UpperCAmelCase_ = divmod(__SCREAMING_SNAKE_CASE , 2 ) re...
23
0
import logging from transformers.configuration_utils import PretrainedConfig SCREAMING_SNAKE_CASE = logging.getLogger(__name__) class lowerCamelCase ( lowercase__ ): '''simple docstring''' lowerCAmelCase_ : List[str] = 'masked_bert' def __i...
717
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @requi...
23
0
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin ...
718
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]: UpperCAmelCase_ = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAM...
23
0
'''simple docstring''' # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v...
719
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) fr...
23
0
import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional i...
720
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int: UpperCAmelCase_ = 1 for i in range(1 , num + 1 ): fact *= i return fact def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int: UpperCAmelCase_ = 0 while number > 0: UpperCAmelCase_ = number % ...
23
0
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def snake_case__ ( ) -> int: UpperCAmelCase_ = HfArgumentParser(__SCREAMING_SNAKE_CASE ) UpperCAmelCase_ = parser.parse_args_into_dataclasses()[0] UpperCAmelCase_ = TensorFlow...
721
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MA...
23
0
"""simple docstring""" import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from tra...
24
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase = logging.get_logger(__name__) low...
24
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inp...
24
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) lowercase = logging.getLogger(__...
24
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase = { '''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConf...
24
"""simple docstring""" from itertools import permutations def UpperCAmelCase ( A : tuple ): '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False ...
24
1
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...t...
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase = { '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxCo...
24
1
"""simple docstring""" from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def UpperCAmelCase ( A : str , A : complex , A : str = "x" , A : float = 10**-10 , A : int = 1 , ): '''simple d...
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', ...
24
1
"""simple docstring""" import math import os import sys def UpperCAmelCase ( A : str ): '''simple docstring''' _UpperCAmelCase = '' try: with open(A , 'rb' ) as binary_file: _UpperCAmelCase = binary_file.read() f...
24
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_al...
24
1
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = abs(A ) _UpperCAmelCase = 0 while n > 0: res += n % 10 n //= 10 return res def UpperCAmelCase ( A : int ): ...
24
"""simple docstring""" from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case = 1...
24
1
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( A : int , A : int ): '''simple docstring''' if partitions <= 0: raise ValueError('partitions must be a positive number!' ) if partitions > number_of_bytes: ...
24
"""simple docstring""" import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device ...
24
1
"""simple docstring""" import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger lowercase = '''<<<<<<< This should probably be modified because it mention...
24
"""simple docstring""" from __future__ import annotations from cmath import sqrt def UpperCAmelCase ( A : int , A : int , A : int ): '''simple docstring''' if a == 0: raise ValueError('Coefficient \'a\' must not be zero.' ) ...
24
1
"""simple docstring""" lowercase = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' ...
24
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @req...
24
1
"""simple docstring""" import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def UpperCAmelCase ( A : int , A : int , A : Any ): ...
24
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers....
24
1
"""simple docstring""" from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDa...
24
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = abs(A ) _UpperCAmelCase = 0 while n > 0: res += n % 10 n //= 10 return res def UpperCAmelCase ( A : int ): ...
24
1
"""simple docstring""" import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_util...
24
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( A : int , A : int ): '''simple docstring''' _UpperCAmelCase = [] create_all_state(1 , A , A , [] , A ) return result ...
24
1
"""simple docstring""" import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase = logging.get_logger(__name__) ...
24
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCa...
24
1
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCas...
24
"""simple docstring""" from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDa...
24
1
"""simple docstring""" import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('''.''') def UpperCAmelCase ( A : int ): '''simple docstring''' ...
24
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch fr...
24
1
"""simple docstring""" from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling...
24
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = [[0 for _ in range(A )] for _ in range(m + 1 )] for i in range(m + 1 ): _UpperCAmelCase = 1 for n in range(m + 1 ): fo...
24
1
"""simple docstring""" def UpperCAmelCase ( A : float , A : float ): '''simple docstring''' if mass < 0: raise ValueError('The mass of a body cannot be negative' ) return 0.5 * mass * abs(A ) * abs(A ) if __name__ == "__main__": ...
24
"""simple docstring""" import os lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00} def UpperCAmelCase ( A : str ): '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase ...
24
1
"""simple docstring""" class lowercase__ : '''simple docstring''' def __init__( self ) -> Dict: _UpperCAmelCase = {} def lowerCamelCase_ ( self ) -> None: print(self.vertex ) ...
24
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, ...
24
1
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def UpperCAmelCase ( A : float , A : float , A : bool = False ): '''simple docstring''' ...
24
"""simple docstring""" import os def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = os.path.join(os.path.dirname(A ) , 'num.txt' ) with open(A ) as file_hand: return str(sum(int(A ) for line in file_hand ) ...
24
1
"""simple docstring""" import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(ch...
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase = { '''configuration_roberta...
24
1
"""simple docstring""" import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...uti...
24
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor lowercase = logging.get_logger(__name__) class lowercase__ ( A ): '''simple docstring''' def __init__( ...
24
1
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCa...
24
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase = logging.get_logger(__name__) low...
24
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from di...
24
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) lowercase = logging.getLogger(__...
24
1
"""simple docstring""" import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def UpperCAmelCase ( *A : Any , A : Optional[Union[Dict, Any]] = None , A : str=True , A : Any=2 ): ...
24
"""simple docstring""" from itertools import permutations def UpperCAmelCase ( A : tuple ): '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False ...
24
1
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip i...
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase = { '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxCo...
24
1
"""simple docstring""" import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffuser...
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', ...
24
1
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @req...
24
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_al...
24
1
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, PNDMScheduler, StableDiffusionLDMaDPipeline, UNetaDConditionMo...
24
"""simple docstring""" from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case = 1...
24
1
"""simple docstring""" from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def ...
24
"""simple docstring""" import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device ...
24
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_t...
24
"""simple docstring""" from __future__ import annotations from cmath import sqrt def UpperCAmelCase ( A : int , A : int , A : int ): '''simple docstring''' if a == 0: raise ValueError('Coefficient \'a\' must not be zero.' ) ...
24
1
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = [[0 for _ in range(A )] for _ in range(m + 1 )] for i in range(m + 1 ): _UpperCAmelCase = 1 for n in range(m + 1 ): fo...
24
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @req...
24
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_al...
24
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers....
24
1
"""simple docstring""" import pprint import requests lowercase = '''https://zenquotes.io/api''' def UpperCAmelCase ( ): '''simple docstring''' return requests.get(API_ENDPOINT_URL + '/today' ).json() def UpperCAmelCase ( ): '''sim...
24
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = abs(A ) _UpperCAmelCase = 0 while n > 0: res += n % 10 n //= 10 return res def UpperCAmelCase ( A : int ): ...
24
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inp...
24
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( A : int , A : int ): '''simple docstring''' _UpperCAmelCase = [] create_all_state(1 , A , A , [] , A ) return result ...
24
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, ...
24
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCa...
24
1
"""simple docstring""" from functools import reduce lowercase = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715...
24
"""simple docstring""" from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDa...
24
1
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch fr...
24
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch fr...
24
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVeca...
24
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = [[0 for _ in range(A )] for _ in range(m + 1 )] for i in range(m + 1 ): _UpperCAmelCase = 1 for n in range(m + 1 ): fo...
24
1
"""simple docstring""" import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import Tokeni...
24
"""simple docstring""" import os lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00} def UpperCAmelCase ( A : str ): '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase ...
24
1
"""simple docstring""" from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu,...
24
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, ...
24
1