text stringlengths 1 1.02k | class_index int64 0 1.38k | source stringclasses 431
values |
|---|---|---|
def validate_environment(self, *args, **kwargs):
if not is_accelerate_available() or is_accelerate_version("<", "0.26.0"):
raise ImportError(
"Loading GGUF Parameters requires `accelerate` installed in your enviroment: `pip install 'accelerate>=0.26.0'`"
)
if not ... | 24 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/gguf/gguf_quantizer.py |
def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
if target_dtype != torch.uint8:
logger.info(f"target_dtype {target_dtype} is replaced by `torch.uint8` for GGUF quantization")
return torch.uint8
def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "tor... | 24 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/gguf/gguf_quantizer.py |
inferred_shape = _quant_shape_from_byte_shape(loaded_param_shape, type_size, block_size)
if inferred_shape != current_param_shape:
raise ValueError(
f"{param_name} has an expected quantized shape of: {inferred_shape}, but receieved shape: {loaded_param_shape}"
)
... | 24 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/gguf/gguf_quantizer.py |
def create_quantized_param(
self,
model: "ModelMixin",
param_value: Union["GGUFParameter", "torch.Tensor"],
param_name: str,
target_device: "torch.device",
state_dict: Optional[Dict[str, Any]] = None,
unexpected_keys: Optional[List[str]] = None,
):
mod... | 24 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/gguf/gguf_quantizer.py |
def _process_model_before_weight_loading(
self,
model: "ModelMixin",
device_map,
keep_in_fp32_modules: List[str] = [],
**kwargs,
):
state_dict = kwargs.get("state_dict", None)
self.modules_to_not_convert.extend(keep_in_fp32_modules)
self.modules_to_no... | 24 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/gguf/gguf_quantizer.py |
def _dequantize(self, model):
is_model_on_cpu = model.device.type == "cpu"
if is_model_on_cpu:
logger.info(
"Model was found to be on CPU (could happen as a result of `enable_model_cpu_offload()`). So, moving it to GPU. After dequantization, will move the model back to CPU ag... | 24 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/gguf/gguf_quantizer.py |
class GGUFParameter(torch.nn.Parameter):
def __new__(cls, data, requires_grad=False, quant_type=None):
data = data if data is not None else torch.empty(0)
self = torch.Tensor._make_subclass(cls, data, requires_grad)
self.quant_type = quant_type
return self
def as_tensor(self):
... | 25 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/gguf/utils.py |
# When converting from original format checkpoints we often use splits, cats etc on tensors
# this method ensures that the returned tensor type from those operations remains GGUFParameter
# so that we preserve quant_type information
quant_type = None
for arg in args:
if isins... | 25 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/gguf/utils.py |
class GGUFLinear(nn.Linear):
def __init__(
self,
in_features,
out_features,
bias=False,
compute_dtype=None,
device=None,
) -> None:
super().__init__(in_features, out_features, bias, device)
self.compute_dtype = compute_dtype
def forward(self, ... | 26 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/gguf/utils.py |
class TorchAoHfQuantizer(DiffusersQuantizer):
r"""
Diffusers Quantizer for TorchAO: https://github.com/pytorch/ao/.
"""
requires_calibration = False
required_packages = ["torchao"]
def __init__(self, quantization_config, **kwargs):
super().__init__(quantization_config, **kwargs)
d... | 27 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/torchao/torchao_quantizer.py |
device_map = kwargs.get("device_map", None)
if isinstance(device_map, dict):
if "cpu" in device_map.values() or "disk" in device_map.values():
if self.pre_quantized:
raise ValueError(
"You are attempting to perform cpu/disk offload with a p... | 27 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/torchao/torchao_quantizer.py |
if self.pre_quantized:
weights_only = kwargs.get("weights_only", None)
if weights_only:
torch_version = version.parse(importlib.metadata.version("torch"))
if torch_version < version.parse("2.5.0"):
# TODO(aryan): TorchAO is compatible with Pyto... | 27 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/torchao/torchao_quantizer.py |
if quant_type.startswith("int") or quant_type.startswith("uint"):
if torch_dtype is not None and torch_dtype != torch.bfloat16:
logger.warning(
f"You are trying to set torch_dtype to {torch_dtype} for int4/int8/uintx quantization, but "
f"only bfloat16... | 27 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/torchao/torchao_quantizer.py |
def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
quant_type = self.quantization_config.quant_type
if quant_type.startswith("int8") or quant_type.startswith("int4"):
# Note that int4 weights are created by packing into torch.int8, but since there is no torch.int4,... | 27 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/torchao/torchao_quantizer.py |
if isinstance(target_dtype, SUPPORTED_TORCH_DTYPES_FOR_QUANTIZATION):
return target_dtype
# We need one of the supported dtypes to be selected in order for accelerate to determine
# the total size of modules/parameters for auto device placement.
possible_device_maps = ["auto", "bala... | 27 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/torchao/torchao_quantizer.py |
def check_if_quantized_param(
self,
model: "ModelMixin",
param_value: "torch.Tensor",
param_name: str,
state_dict: Dict[str, Any],
**kwargs,
) -> bool:
param_device = kwargs.pop("param_device", None)
# Check if the param_name is not in self.modules_to_... | 27 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/torchao/torchao_quantizer.py |
def create_quantized_param(
self,
model: "ModelMixin",
param_value: "torch.Tensor",
param_name: str,
target_device: "torch.device",
state_dict: Dict[str, Any],
unexpected_keys: List[str],
):
r"""
Each nn.Linear layer that needs to be quantized ... | 27 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/torchao/torchao_quantizer.py |
if self.pre_quantized:
# If we're loading pre-quantized weights, replace the repr of linear layers for pretty printing info
# about AffineQuantizedTensor
module._parameters[tensor_name] = torch.nn.Parameter(param_value.to(device=target_device))
if isinstance(module, nn.Li... | 27 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/torchao/torchao_quantizer.py |
if not isinstance(self.modules_to_not_convert, list):
self.modules_to_not_convert = [self.modules_to_not_convert]
self.modules_to_not_convert.extend(keep_in_fp32_modules)
# Extend `self.modules_to_not_convert` to keys that are supposed to be offloaded to `cpu` or `disk`
if isinstan... | 27 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/torchao/torchao_quantizer.py |
def _process_model_after_weight_loading(self, model: "ModelMixin"):
return model
def is_serializable(self, safe_serialization=None):
# TODO(aryan): needs to be tested
if safe_serialization:
logger.warning(
"torchao quantized model does not support safe serializat... | 27 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/torchao/torchao_quantizer.py |
if self.offload and self.quantization_config.modules_to_not_convert is None:
logger.warning(
"The model contains offloaded modules and these modules are not quantized. We don't recommend saving the model as we won't be able to reload them."
"If you want to specify modules to ... | 27 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/torchao/torchao_quantizer.py |
class BnB4BitDiffusersQuantizer(DiffusersQuantizer):
"""
4-bit quantization from bitsandbytes.py quantization method:
before loading: converts transformer layers into Linear4bit during loading: load 16bit weight and pass to the
layer object after: quantizes individual weights in Linear4bit into ... | 28 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
def validate_environment(self, *args, **kwargs):
if not torch.cuda.is_available():
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
if not is_accelerate_available() or is_accelerate_version("<", "0.26.0"):
raise ImportError(
"Using `bitsandbyt... | 28 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
device_map = kwargs.get("device_map", None)
if (
device_map is not None
and isinstance(device_map, dict)
and not self.quantization_config.llm_int8_enable_fp32_cpu_offload
):
device_map_without_no_convert = {
key: device_map[key] for key in ... | 28 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
"https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu "
"for more details. "
) | 28 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
if target_dtype != torch.int8:
from accelerate.utils import CustomDtype
logger.info("target_dtype {target_dtype} is replaced by `CustomDtype.INT4` for 4-bit BnB quantization")
return CustomDtype.INT... | 28 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
module, tensor_name = get_module_from_name(model, param_name)
if isinstance(module._parameters.get(tensor_name, None), bnb.nn.Params4bit):
# Add here check for loaded components' dtypes once serialization is implemented
return True
elif isinstance(module, bnb.nn.Linear4bit) and t... | 28 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
if tensor_name not in module._parameters:
raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.")
old_value = getattr(module, tensor_name)
if tensor_name == "bias":
if param_value is None:
new_value = old_value.to(target_device)
... | 28 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
if not isinstance(module._parameters[tensor_name], bnb.nn.Params4bit):
raise ValueError("this function only loads `Linear4bit components`")
if (
old_value.device == torch.device("meta")
and target_device not in ["meta", torch.device("meta")]
and param_value is Non... | 28 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
if not self.is_serializable:
raise ValueError(
"Detected int4 weights but the version of bitsandbytes is not compatible with int4 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`."
)
... | 28 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
quantized_stats = {}
for k, v in state_dict.items():
# `startswith` to counter for edge cases where `param_name`
# substring can be present in multiple places in the `state_dict`
if param_name + "." in k and k.startswith(param_name):
quanti... | 28 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
def check_quantized_param_shape(self, param_name, current_param, loaded_param):
current_param_shape = current_param.shape
loaded_param_shape = loaded_param.shape
n = current_param_shape.numel()
inferred_shape = (n,) if "bias" in param_name else ((n + 1) // 2, 1)
if loaded_param_... | 28 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
if torch_dtype is None:
# We force the `dtype` to be float16, this is a requirement from `bitsandbytes`
logger.info(
"Overriding torch_dtype=%s with `torch_dtype=torch.float16` due to "
... | 28 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
# (sayakpaul): I think it could be better to disable custom `device_map`s
# for the first phase of the integration in the interest of simplicity.
# Commenting this for discussions on the PR.
# def update_device_map(self, device_map):
# if device_map is None:
# device_map = {"": torch.cud... | 28 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
# We may keep some modules such as the `proj_out` in their original dtype for numerical stability reasons
self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules
if not isinstance(self.modules_to_not_convert, list):
self.modules_to_not_convert = [self.modules_to_not_con... | 28 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
if len(keys_on_cpu) > 0 and not load_in_8bit_fp32_cpu_offload:
raise ValueError(
"If you want to offload some keys to `cpu` or `disk`, you need to set "
"`llm_int8_enable_fp32_cpu_offload=True`. Note that these modules will not be "
" converted... | 28 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
model = replace_with_bnb_linear(
model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config
)
model.config.quantization_config = self.quantization_config
def _process_model_after_weight_loading(self, model: "ModelMixin", **kwargs):
mo... | 28 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
is_model_on_cpu = model.device.type == "cpu"
if is_model_on_cpu:
logger.info(
"Model was found to be on CPU (could happen as a result of `enable_model_cpu_offload()`). So, moving it to GPU. After dequantization, will move the model back to CPU again to preserve the previous device."
... | 28 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
class BnB8BitDiffusersQuantizer(DiffusersQuantizer):
"""
8-bit quantization from bitsandbytes quantization method:
before loading: converts transformer layers into Linear8bitLt during loading: load 16bit weight and pass to the
layer object after: quantizes individual weights in Linear8bitLt into... | 29 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
def validate_environment(self, *args, **kwargs):
if not torch.cuda.is_available():
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
if not is_accelerate_available() or is_accelerate_version("<", "0.26.0"):
raise ImportError(
"Using `bitsandbyt... | 29 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
device_map = kwargs.get("device_map", None)
if (
device_map is not None
and isinstance(device_map, dict)
and not self.quantization_config.llm_int8_enable_fp32_cpu_offload
):
device_map_without_no_convert = {
key: device_map[key] for key in ... | 29 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
"https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu "
"for more details. "
) | 29 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
# Copied from diffusers.quantizers.bitsandbytes.bnb_quantizer.BnB4BitDiffusersQuantizer.adjust_max_memory
def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
# need more space for buffers that are created during quantization
max_memory = {key: val * 0.9... | 29 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
# Copied from diffusers.quantizers.bitsandbytes.bnb_quantizer.BnB4BitDiffusersQuantizer.update_torch_dtype
def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
if torch_dtype is None:
# We force the `dtype` to be float16, this is a requirement from `bitsandbytes`
... | 29 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
# # Copied from diffusers.quantizers.bitsandbytes.bnb_quantizer.BnB4BitDiffusersQuantizer.update_device_map
# def update_device_map(self, device_map):
# if device_map is None:
# device_map = {"": torch.cuda.current_device()}
# logger.info(
# "The device_map was not in... | 29 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
def check_if_quantized_param(
self,
model: "ModelMixin",
param_value: "torch.Tensor",
param_name: str,
state_dict: Dict[str, Any],
**kwargs,
):
import bitsandbytes as bnb
module, tensor_name = get_module_from_name(model, param_name)
if isinsta... | 29 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
def create_quantized_param(
self,
model: "ModelMixin",
param_value: "torch.Tensor",
param_name: str,
target_device: "torch.device",
state_dict: Dict[str, Any],
unexpected_keys: Optional[List[str]] = None,
):
import bitsandbytes as bnb
fp16_sta... | 29 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
if not isinstance(module._parameters[tensor_name], bnb.nn.Int8Params):
raise ValueError(f"Parameter `{tensor_name}` should only be a `bnb.nn.Int8Params` instance.")
if (
old_value.device == torch.device("meta")
and target_device not in ["meta", torch.device("meta")]
... | 29 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
module._parameters[tensor_name] = new_value
if fp16_statistics is not None:
setattr(module.weight, "SCB", fp16_statistics.to(target_device))
if unexpected_keys is not None:
unexpected_keys.remove(fp16_statistics_key)
# We just need to pop the `weight_format` keys... | 29 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
# Copied from diffusers.quantizers.bitsandbytes.bnb_quantizer.BnB4BitDiffusersQuantizer._process_model_before_weight_loading
def _process_model_before_weight_loading(
self,
model: "ModelMixin",
device_map,
keep_in_fp32_modules: List[str] = [],
**kwargs,
):
from .u... | 29 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
# Extend `self.modules_to_not_convert` to keys that are supposed to be offloaded to `cpu` or `disk`
if isinstance(device_map, dict) and len(device_map.keys()) > 1:
keys_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
if len(keys_on_cpu) > 0 and not load_i... | 29 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
# Purge `None`.
# Unlike `transformers`, we don't know if we should always keep certain modules in FP32
# in case of diffusion transformer models. For language models and others alike, `lm_head`
# and tied modules are usually kept in FP32.
self.modules_to_not_convert = [module for module... | 29 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
@property
# Copied from diffusers.quantizers.bitsandbytes.bnb_quantizer.BnB4BitDiffusersQuantizer.is_serializable
def is_trainable(self) -> bool:
# Because we're mandating `bitsandbytes` 0.43.3.
return True
def _dequantize(self, model):
from .utils import dequantize_and_replace
... | 29 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py |
class FlaxImagePipelineOutput(BaseOutput):
"""
Output class for image pipelines.
Args:
images (`List[PIL.Image.Image]` or `np.ndarray`)
List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width,
num_channels)`.
"""
images... | 30 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
class FlaxDiffusionPipeline(ConfigMixin, PushToHubMixin):
r"""
Base class for Flax-based pipelines.
[`FlaxDiffusionPipeline`] stores all components (models, schedulers, and processors) for diffusion pipelines and
provides methods for loading, downloading and saving models. It also includes methods to:
... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
# check if the module is a pipeline module
pipeline_dir = module.__module__.split(".")[-2]
path = module.__module__.split(".")
is_pipeline_module = pipeline_dir in path and hasattr(pipelines, pipeline_dir)
# if library is not in LOADABLE_CLASSES, then it ... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
def save_pretrained(
self,
save_directory: Union[str, os.PathLike],
params: Union[Dict, FrozenDict],
push_to_hub: bool = False,
**kwargs,
):
# TODO: handle inference_state
"""
Save all saveable variables of the pipeline to a directory. A pipeline varia... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
Arguments:
save_directory (`str` or `os.PathLike`):
Directory to which to save. Will be created if it doesn't exist.
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face model hub after saving it. You can spe... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
private = kwargs.pop("private", None)
create_pr = kwargs.pop("create_pr", False)
token = kwargs.pop("token", None)
repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
save_method_name = None
# search for the model's base class in LOADABLE_CLASSES
for library_name, library_classes in LOADABLE_CLASSES.items():
library = importlib.import_module(library_name)
for base_class, save_load_methods in library_classes.items():
... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
if expects_params:
save_method(
os.path.join(save_directory, pipeline_component_name), params=params[pipeline_component_name]
)
else:
save_method(os.path.join(save_directory, pipeline_component_name))
if push_to_hub:
... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
If you get the error message below, you need to finetune the weights for your downstream task:
```
Some weights of FlaxUNet2DConditionModel were not initialized from the model checkpoint at stable-diffusion-v1-5/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
`... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
- A string, the *repo id* (for example `stable-diffusion-v1-5/stable-diffusion-v1-5`) of a
pretrained pipeline hosted on the Hub.
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
using [`~FlaxDiffusionPipeli... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(`bool`, *optional*, defaults to `False`)... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
allowed by Git.
mirror (`str`, *optional*):
Mirror source to resolve accessibility issues if you're downloading a model in China. We do not
guarantee the tim... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
<Tip>
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with
`huggingface-cli login`.
</Tip>
Examples:
```py
>>> from diffusers import FlaxDiffusionPipeline
>>> # Download pipeline from huggingface.co and cach... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
>>> model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
>>> dpmpp, dpmpp_state = FlaxDPMSolverMultistepScheduler.from_pretrained(
... model_id,
... subfolder="scheduler",
... )
>>> dpm_pipe, dpm_params = FlaxStableDiffusionPipeline.from_pretrained(
... m... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
# 1. Download the checkpoints and configs
# use snapshot download here to get it working from from_pretrained
if not os.path.isdir(pretrained_model_name_or_path):
config_dict = cls.load_config(
pretrained_model_name_or_path,
cache_dir=cache_dir,
... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
if cls != FlaxDiffusionPipeline:
requested_pipeline_class = cls.__name__
else:
requested_pipeline_class = config_dict.get("_class_name", cls.__name__)
requested_pipeline_class = (
requested_pipeline_class
if requested_pi... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
# download all allow_patterns
cached_folder = snapshot_download(
pretrained_model_name_or_path,
cache_dir=cache_dir,
proxies=proxies,
local_files_only=local_files_only,
token=token,
revision=revision,
... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
# 2. Load the pipeline class, if using custom module then load it from the hub
# if we load from explicit class, let's use it
if cls != FlaxDiffusionPipeline:
pipeline_class = cls
else:
diffusers_module = importlib.import_module(cls.__module__.split(".")[0])
c... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
init_dict, unused_kwargs, _ = pipeline_class.extract_init_dict(config_dict, **kwargs)
# define init kwargs
init_kwargs = {k: init_dict.pop(k) for k in optional_kwargs if k in init_dict}
init_kwargs = {**init_kwargs, **passed_pipe_kwargs}
# remove `null` components
def load_modu... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
# 3. Load each module in the pipeline
for name, (library_name, class_name) in init_dict.items():
if class_name is None:
# edge case for when the pipeline was saved with safety_checker=None
init_kwargs[name] = None
continue
is_pipeline_modu... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
expected_class_obj = None
for class_name, class_candidate in class_candidates.items():
if class_candidate is not None and issubclass(class_obj, class_candidate):
expected_class_obj = class_candidate | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
if not issubclass(passed_class_obj[name].__class__, expected_class_obj):
raise ValueError(
f"{passed_class_obj[name]} is of type: {type(passed_class_obj[name])}, but should be"
f" {expected_class_obj}"
)
... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
# set passed class object
loaded_sub_model = passed_class_obj[name]
elif is_pipeline_module:
pipeline_module = getattr(pipelines, library_name)
class_obj = import_flax_or_no_model(pipeline_module, class_name)
importable_classes = ALL_IMPORTABL... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
if loaded_sub_model is None and sub_model_should_be_defined:
load_method_name = None
for class_name, class_candidate in class_candidates.items():
if class_candidate is not None and issubclass(class_obj, class_candidate):
load_method_name = impo... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
if issubclass(class_obj, FlaxModelMixin):
loaded_sub_model, loaded_params = load_method(
loadable_folder,
from_pt=from_pt,
use_memory_efficient_attention=use_memory_efficient_attention,
split_head_dim=spl... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
loaded_sub_model, loaded_params = load_method(loadable_folder, _do_init=False)
params[name] = loaded_params
elif issubclass(class_obj, FlaxSchedulerMixin):
loaded_sub_model, scheduler_state = load_method(loadable_folder)
params[name] = schedule... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
init_kwargs[name] = loaded_sub_model # UNet(...), # DiffusionSchedule(...)
# 4. Potentially add passed objects if expected
missing_modules = set(expected_modules) - set(init_kwargs.keys())
passed_modules = list(passed_class_obj.keys())
if len(missing_modules) > 0 and missing_modules <... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
@classmethod
def _get_signature_keys(cls, obj):
parameters = inspect.signature(obj.__init__).parameters
required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty}
optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty})
... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
>>> text2img = FlaxStableDiffusionPipeline.from_pretrained(
... "stable-diffusion-v1-5/stable-diffusion-v1-5", variant="bf16", dtype=jnp.bfloat16
... )
>>> img2img = FlaxStableDiffusionImg2ImgPipeline(**text2img.components)
```
Returns:
A dictionary containing al... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
@staticmethod
def numpy_to_pil(images):
"""
Convert a NumPy image or a batch of images to a PIL image.
"""
if images.ndim == 3:
images = images[None, ...]
images = (images * 255).round().astype("uint8")
if images.shape[-1] == 1:
# special case ... | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
return tqdm(iterable, **self._progress_bar_config)
def set_progress_bar_config(self, **kwargs):
self._progress_bar_config = kwargs | 31 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pipeline_flax_utils.py |
class AutoPipelineForText2Image(ConfigMixin):
r"""
[`AutoPipelineForText2Image`] is a generic pipeline class that instantiates a text-to-image pipeline class. The
specific underlying pipeline class is automatically selected from either the
[`~AutoPipelineForText2Image.from_pretrained`] or [`~AutoPipeli... | 32 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/auto_pipeline.py |
@classmethod
@validate_hf_hub_args
def from_pretrained(cls, pretrained_model_or_path, **kwargs):
r"""
Instantiates a text-to-image Pytorch diffusion pipeline from pretrained pipeline weight.
The from_pretrained() method takes care of returning the correct pipeline class instance by:
... | 32 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/auto_pipeline.py |
```
Some weights of UNet2DConditionModel were not initialized from the model checkpoint at stable-diffusion-v1-5/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
- conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in ... | 32 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/auto_pipeline.py |
- A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline
hosted on the Hub.
- A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights
saved using
[`~DiffusionP... | 32 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/auto_pipeline.py |
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
is not used. | 32 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/auto_pipeline.py |
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(`bool`, *optional*, defaults to `False`)... | 32 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/auto_pipeline.py |
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
allowed by Git.
custom_revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id similar to
... | 32 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/auto_pipeline.py |
A map that specifies where each submodule should go. It doesn’t need to be defined for each
parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the
same device. | 32 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/auto_pipeline.py |
Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For
more information about each option see [designing a device
map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
max_memory (`Dict`... | 32 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/auto_pipeline.py |
when there is some disk offload.
low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
Speed up model loading only loading the pretrained weights and not initializing the weights. This also
tries to not use more than 1x model size ... | 32 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/auto_pipeline.py |
Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline
class). The overwritten components are passed directly to the pipelines `__init__` method. See example
below for more information.
variant (`str`, *optional*):
... | 32 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/auto_pipeline.py |
<Tip>
To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with
`huggingface-cli login`.
</Tip>
Examples:
```py
>>> from diffusers import AutoPipelineForText2Image
>>> pipeline = AutoPipelineForText2Image.from_pr... | 32 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/auto_pipeline.py |
load_config_kwargs = {
"cache_dir": cache_dir,
"force_download": force_download,
"proxies": proxies,
"token": token,
"local_files_only": local_files_only,
"revision": revision,
}
config = cls.load_config(pretrained_model_or_path, *... | 32 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/auto_pipeline.py |
if "controlnet" in kwargs:
if isinstance(kwargs["controlnet"], ControlNetUnionModel):
orig_class_name = config["_class_name"].replace(to_replace, "ControlNetUnionPipeline")
else:
orig_class_name = config["_class_name"].replace(to_replace, "ControlNetPipeline")
... | 32 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/auto_pipeline.py |
The from_pipe() method takes care of returning the correct pipeline class instance by finding the text-to-image
pipeline linked to the pipeline class using pattern matching on pipeline class name.
All the modules the pipeline contains will be used to initialize the new pipeline without reallocating
... | 32 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/auto_pipeline.py |
original_config = dict(pipeline.config)
original_cls_name = pipeline.__class__.__name__
# derive the pipeline class to instantiate
text_2_image_cls = _get_task_class(AUTO_TEXT2IMAGE_PIPELINES_MAPPING, original_cls_name)
if "controlnet" in kwargs:
if kwargs["controlnet"] is ... | 32 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/auto_pipeline.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.