forked from huggingface/peft
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathclasses.dot
53 lines (53 loc) · 13.8 KB
/
classes.dot
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
digraph "classes" {
rankdir=BT
charset="utf-8"
"peft.tuners.lora.aqlm.AqlmLoraLinear" [color="black", fontcolor="black", label=<{AqlmLoraLinear|<br ALIGN="LEFT"/>|forward(x: torch.Tensor)<br ALIGN="LEFT"/>}>, shape="record", style="solid"];
"peft.tuners.lora.awq.AwqLoraLinear" [color="black", fontcolor="black", label=<{AwqLoraLinear|quant_linear_module<br ALIGN="LEFT"/>|forward(x: torch.Tensor)<br ALIGN="LEFT"/>}>, shape="record", style="solid"];
"peft.tuners.lora.layer.Conv2d" [color="black", fontcolor="black", label=<{Conv2d|conv_fn : NoneType<br ALIGN="LEFT"/>|}>, shape="record", style="solid"];
"peft.tuners.lora.layer.Conv3d" [color="black", fontcolor="black", label=<{Conv3d|conv_fn : NoneType<br ALIGN="LEFT"/>|}>, shape="record", style="solid"];
"peft.tuners.lora.dora.DoraConv2dLayer" [color="black", fontcolor="black", label=<{DoraConv2dLayer|conv_fn : NoneType<br ALIGN="LEFT"/>|}>, shape="record", style="solid"];
"peft.tuners.lora.dora.DoraConv3dLayer" [color="black", fontcolor="black", label=<{DoraConv3dLayer|conv_fn : NoneType<br ALIGN="LEFT"/>|}>, shape="record", style="solid"];
"peft.tuners.lora.dora.DoraEmbeddingLayer" [color="black", fontcolor="black", label=<{DoraEmbeddingLayer|<br ALIGN="LEFT"/>|forward(x)<br ALIGN="LEFT"/>}>, shape="record", style="solid"];
"peft.tuners.lora.dora.DoraLinearLayer" [color="black", fontcolor="black", label=<{DoraLinearLayer|fan_in_fan_out<br ALIGN="LEFT"/>weight : Parameter<br ALIGN="LEFT"/>|forward(x)<br ALIGN="LEFT"/>get_weight_norm(weight, lora_weight, scaling): torch.Tensor<br ALIGN="LEFT"/>update_layer(): None<br ALIGN="LEFT"/>}>, shape="record", style="solid"];
"peft.tuners.lora.eetq.EetqLoraLinear" [color="black", fontcolor="black", label=<{EetqLoraLinear|quant_linear_module<br ALIGN="LEFT"/>|forward(x: torch.Tensor)<br ALIGN="LEFT"/>merge(safe_merge: bool, adapter_names: Optional[List[str]]): None<br ALIGN="LEFT"/>unmerge(): None<br ALIGN="LEFT"/>}>, shape="record", style="solid"];
"peft.tuners.lora.layer.Embedding" [color="black", fontcolor="black", label=<{Embedding|adapter_layer_names<br ALIGN="LEFT"/>|dora_init(adapter_name: str): None<br ALIGN="LEFT"/>forward(x: torch.Tensor): torch.Tensor<br ALIGN="LEFT"/>get_delta_weight(adapter): torch.Tensor<br ALIGN="LEFT"/>merge(safe_merge: bool, adapter_names: Optional[list[str]]): None<br ALIGN="LEFT"/>unmerge(): None<br ALIGN="LEFT"/>update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora, lora_bias)<br ALIGN="LEFT"/>}>, shape="record", style="solid"];
"peft.tuners.lora.config.EvaConfig" [color="black", fontcolor="black", label=<{EvaConfig|adjust_scaling_factors : bool<br ALIGN="LEFT"/>label_mask_value : int<br ALIGN="LEFT"/>rho : float<br ALIGN="LEFT"/>tau : float<br ALIGN="LEFT"/>use_label_mask : bool<br ALIGN="LEFT"/>whiten : bool<br ALIGN="LEFT"/>|}>, shape="record", style="solid"];
"peft.tuners.lora.eva.HashHook" [color="black", fontcolor="black", label=<{HashHook|hashed_inputs : list<br ALIGN="LEFT"/>model_input<br ALIGN="LEFT"/>|hash_fn(tensor)<br ALIGN="LEFT"/>}>, shape="record", style="solid"];
"peft.tuners.lora.hqq.HqqLoraLinear" [color="black", fontcolor="black", label=<{HqqLoraLinear|base_layer<br ALIGN="LEFT"/>fan_in_fan_out : bool<br ALIGN="LEFT"/>|forward(x: torch.Tensor): torch.Tensor<br ALIGN="LEFT"/>get_delta_weight(adapter)<br ALIGN="LEFT"/>merge(safe_merge: bool, adapter_names: Optional[list[str]]): None<br ALIGN="LEFT"/>unmerge(): None<br ALIGN="LEFT"/>}>, shape="record", style="solid"];
"peft.tuners.lora.layer.Linear" [color="black", fontcolor="black", label=<{Linear|fan_in_fan_out : bool<br ALIGN="LEFT"/>is_target_conv_1d_layer : bool<br ALIGN="LEFT"/>|forward(x: torch.Tensor): torch.Tensor<br ALIGN="LEFT"/>get_delta_weight(adapter): torch.Tensor<br ALIGN="LEFT"/>merge(safe_merge: bool, adapter_names: Optional[list[str]]): None<br ALIGN="LEFT"/>unmerge(): None<br ALIGN="LEFT"/>}>, shape="record", style="solid"];
"peft.tuners.lora.bnb.Linear4bit" [color="black", fontcolor="black", label=<{Linear4bit|fan_in_fan_out : bool<br ALIGN="LEFT"/>weight : Params4bit<br ALIGN="LEFT"/>|forward(x: torch.Tensor): torch.Tensor<br ALIGN="LEFT"/>get_delta_weight(adapter)<br ALIGN="LEFT"/>merge(safe_merge: bool, adapter_names: Optional[list[str]]): None<br ALIGN="LEFT"/>unmerge(): None<br ALIGN="LEFT"/>}>, shape="record", style="solid"];
"peft.tuners.lora.bnb.Linear8bitLt" [color="black", fontcolor="black", label=<{Linear8bitLt|fan_in_fan_out : bool<br ALIGN="LEFT"/>weight : Int8Params<br ALIGN="LEFT"/>|forward(x: torch.Tensor): torch.Tensor<br ALIGN="LEFT"/>get_delta_weight(adapter)<br ALIGN="LEFT"/>merge(safe_merge: bool, adapter_names: Optional[list[str]]): None<br ALIGN="LEFT"/>unmerge(): None<br ALIGN="LEFT"/>}>, shape="record", style="solid"];
"peft.tuners.lora.config.LoftQConfig" [color="black", fontcolor="black", label=<{LoftQConfig|loftq_bits : int<br ALIGN="LEFT"/>loftq_iter : int<br ALIGN="LEFT"/>|}>, shape="record", style="solid"];
"peft.tuners.lora.config.LoraConfig" [color="black", fontcolor="black", label=<{LoraConfig|alpha_pattern : Optional[dict]<br ALIGN="LEFT"/>bias : Literal['none', 'all', 'lora_only']<br ALIGN="LEFT"/>eva_config : Optional[EvaConfig]<br ALIGN="LEFT"/>exclude_modules : Optional[Union[list[str], str]]<br ALIGN="LEFT"/>fan_in_fan_out : bool<br ALIGN="LEFT"/>init_lora_weights : bool \| Literal['gaussian', 'eva', 'olora', 'pissa', 'pissa_niter_[number of iters]', 'loftq']<br ALIGN="LEFT"/>layer_replication : Optional[list[tuple[int, int]]]<br ALIGN="LEFT"/>layers_pattern : Optional[Union[list[str], str]]<br ALIGN="LEFT"/>layers_to_transform : Optional[Union[list[int], int]]<br ALIGN="LEFT"/>loftq_config : Union[LoftQConfig, dict]<br ALIGN="LEFT"/>lora_alpha : int<br ALIGN="LEFT"/>lora_bias : bool<br ALIGN="LEFT"/>lora_dropout : float<br ALIGN="LEFT"/>megatron_config : Optional[dict]<br ALIGN="LEFT"/>megatron_core : Optional[str]<br ALIGN="LEFT"/>modules_to_save : Optional[list[str]]<br ALIGN="LEFT"/>peft_type : LORA<br ALIGN="LEFT"/>r : int<br ALIGN="LEFT"/>rank_pattern : Optional[dict]<br ALIGN="LEFT"/>runtime_config<br ALIGN="LEFT"/>target_modules : Optional[Union[list[str], str]]<br ALIGN="LEFT"/>use_dora : bool<br ALIGN="LEFT"/>use_rslora : bool<br ALIGN="LEFT"/>|to_dict()<br ALIGN="LEFT"/>}>, shape="record", style="solid"];
"peft.tuners.lora.layer.LoraLayer" [color="black", fontcolor="black", label=<{LoraLayer|adapter_layer_names : tuple<br ALIGN="LEFT"/>base_layer : Module<br ALIGN="LEFT"/>ephemeral_gpu_offload : bool<br ALIGN="LEFT"/>in_features<br ALIGN="LEFT"/>kwargs : dict<br ALIGN="LEFT"/>lora_A : ModuleDict<br ALIGN="LEFT"/>lora_B : ModuleDict<br ALIGN="LEFT"/>lora_alpha : dict<br ALIGN="LEFT"/>lora_bias : dict[str, bool]<br ALIGN="LEFT"/>lora_dropout : ModuleDict<br ALIGN="LEFT"/>lora_embedding_A : ParameterDict<br ALIGN="LEFT"/>lora_embedding_B : ParameterDict<br ALIGN="LEFT"/>lora_magnitude_vector : ModuleDict<br ALIGN="LEFT"/>merged_adapters : list<br ALIGN="LEFT"/>other_param_names : tuple<br ALIGN="LEFT"/>out_features : NoneType<br ALIGN="LEFT"/>r : dict<br ALIGN="LEFT"/>scaling : dict<br ALIGN="LEFT"/>use_dora : dict[str, bool]<br ALIGN="LEFT"/>weight<br ALIGN="LEFT"/>|dora_init(adapter_name: str): None<br ALIGN="LEFT"/>loftq_init(adapter_name)<br ALIGN="LEFT"/>olora_init(adapter_name)<br ALIGN="LEFT"/>pissa_init(adapter_name, init_lora_weights)<br ALIGN="LEFT"/>reset_lora_parameters(adapter_name, init_lora_weights)<br ALIGN="LEFT"/>scale_layer(scale: float): None<br ALIGN="LEFT"/>set_scale(adapter, scale)<br ALIGN="LEFT"/>unscale_layer(scale): None<br ALIGN="LEFT"/>update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora: bool, lora_bias: bool)<br ALIGN="LEFT"/>}>, shape="record", style="solid"];
"peft.tuners.lora.model.LoraModel" [color="black", fontcolor="black", label=<{LoraModel|active_adapter : str \| list[str]<br ALIGN="LEFT"/>prefix : str<br ALIGN="LEFT"/>|add_weighted_adapter(adapters: list[str], weights: list[float], adapter_name: str, combination_type: str, svd_rank: int \| None, svd_clamp: int \| None, svd_full_matrices: bool, svd_driver: str \| None, density: float \| None, majority_sign_method: Literal['total', 'frequency']): None<br ALIGN="LEFT"/>delete_adapter(adapter_name: str): None<br ALIGN="LEFT"/>disable_adapter_layers(): None<br ALIGN="LEFT"/>enable_adapter_layers(): None<br ALIGN="LEFT"/>get_peft_config_as_dict(inference: bool)<br ALIGN="LEFT"/>merge_and_unload(progressbar: bool, safe_merge: bool, adapter_names: Optional[list[str]]): torch.nn.Module<br ALIGN="LEFT"/>set_adapter(adapter_name: str \| list[str]): None<br ALIGN="LEFT"/>subtract_mutated_init(output_state_dict: dict[str, torch.Tensor], adapter_name: str, kwargs)<br ALIGN="LEFT"/>unload(): torch.nn.Module<br ALIGN="LEFT"/>}>, shape="record", style="solid"];
"peft.tuners.lora.tp_layer.LoraParallelLinear" [color="black", fontcolor="black", label=<{LoraParallelLinear|backend<br ALIGN="LEFT"/>fan_in_fan_out : bool<br ALIGN="LEFT"/>is_parallel_a<br ALIGN="LEFT"/>is_target_conv_1d_layer : bool<br ALIGN="LEFT"/>|forward(x: torch.Tensor)<br ALIGN="LEFT"/>get_delta_weight(adapter): torch.Tensor<br ALIGN="LEFT"/>merge(safe_merge: bool, adapter_names: Optional[list[str]]): None<br ALIGN="LEFT"/>unmerge(): None<br ALIGN="LEFT"/>update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora, init_method, input_is_parallel, gather_output)<br ALIGN="LEFT"/>}>, shape="record", style="solid"];
"peft.tuners.lora.config.LoraRuntimeConfig" [color="black", fontcolor="black", label=<{LoraRuntimeConfig|ephemeral_gpu_offload : bool<br ALIGN="LEFT"/>|}>, shape="record", style="solid"];
"peft.tuners.lora.gptq.QuantLinear" [color="black", fontcolor="black", label=<{QuantLinear|quant_linear_module<br ALIGN="LEFT"/>|forward(x: torch.Tensor)<br ALIGN="LEFT"/>}>, shape="record", style="solid"];
"peft.tuners.lora.eva.SVDHook" [color="black", fontcolor="black", label=<{SVDHook|converged<br ALIGN="LEFT"/>model_input : NoneType<br ALIGN="LEFT"/>n_components : int<br ALIGN="LEFT"/>sim_thresh : Union[float, torch.Tensor]<br ALIGN="LEFT"/>svd : IncrementalPCA<br ALIGN="LEFT"/>|}>, shape="record", style="solid"];
"peft.tuners.lora.torchao.TorchaoLoraLinear" [color="black", fontcolor="black", label=<{TorchaoLoraLinear|get_apply_tensor_subclass<br ALIGN="LEFT"/>weight<br ALIGN="LEFT"/>|merge(safe_merge: bool, adapter_names: Optional[list[str]]): None<br ALIGN="LEFT"/>unmerge(): None<br ALIGN="LEFT"/>}>, shape="record", style="solid"];
"peft.tuners.lora.layer._ConvNd" [color="black", fontcolor="black", label=<{_ConvNd|adapter_layer_names<br ALIGN="LEFT"/>|dora_init(adapter_name: str): None<br ALIGN="LEFT"/>forward(x: torch.Tensor): torch.Tensor<br ALIGN="LEFT"/>get_delta_weight(adapter): torch.Tensor<br ALIGN="LEFT"/>merge(safe_merge: bool, adapter_names: Optional[list[str]]): None<br ALIGN="LEFT"/>unmerge(): None<br ALIGN="LEFT"/>update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora, lora_bias)<br ALIGN="LEFT"/>}>, shape="record", style="solid"];
"peft.tuners.lora.dora._DoraConvNdLayer" [color="black", fontcolor="black", label=<{_DoraConvNdLayer|<br ALIGN="LEFT"/>|forward(x)<br ALIGN="LEFT"/>get_weight_norm(weight, lora_weight, scaling): torch.Tensor<br ALIGN="LEFT"/>}>, shape="record", style="solid"];
"peft.tuners.lora.eva._Hook" [color="black", fontcolor="black", label=<{_Hook|gather_distributed_inputs : bool<br ALIGN="LEFT"/>model_input : NoneType<br ALIGN="LEFT"/>name : str<br ALIGN="LEFT"/>|gather_layer_inputs(layer_input)<br ALIGN="LEFT"/>prepare_layer_inputs(layer_input)<br ALIGN="LEFT"/>}>, shape="record", style="solid"];
"peft.tuners.lora.aqlm.AqlmLoraLinear" -> "peft.tuners.lora.layer.LoraLayer" [arrowhead="empty", arrowtail="none"];
"peft.tuners.lora.awq.AwqLoraLinear" -> "peft.tuners.lora.layer.LoraLayer" [arrowhead="empty", arrowtail="none"];
"peft.tuners.lora.bnb.Linear4bit" -> "peft.tuners.lora.layer.LoraLayer" [arrowhead="empty", arrowtail="none"];
"peft.tuners.lora.bnb.Linear8bitLt" -> "peft.tuners.lora.layer.LoraLayer" [arrowhead="empty", arrowtail="none"];
"peft.tuners.lora.dora.DoraConv2dLayer" -> "peft.tuners.lora.dora._DoraConvNdLayer" [arrowhead="empty", arrowtail="none"];
"peft.tuners.lora.dora.DoraConv3dLayer" -> "peft.tuners.lora.dora._DoraConvNdLayer" [arrowhead="empty", arrowtail="none"];
"peft.tuners.lora.dora.DoraEmbeddingLayer" -> "peft.tuners.lora.dora.DoraLinearLayer" [arrowhead="empty", arrowtail="none"];
"peft.tuners.lora.dora._DoraConvNdLayer" -> "peft.tuners.lora.dora.DoraLinearLayer" [arrowhead="empty", arrowtail="none"];
"peft.tuners.lora.eetq.EetqLoraLinear" -> "peft.tuners.lora.layer.LoraLayer" [arrowhead="empty", arrowtail="none"];
"peft.tuners.lora.eva.HashHook" -> "peft.tuners.lora.eva._Hook" [arrowhead="empty", arrowtail="none"];
"peft.tuners.lora.eva.SVDHook" -> "peft.tuners.lora.eva._Hook" [arrowhead="empty", arrowtail="none"];
"peft.tuners.lora.gptq.QuantLinear" -> "peft.tuners.lora.layer.LoraLayer" [arrowhead="empty", arrowtail="none"];
"peft.tuners.lora.hqq.HqqLoraLinear" -> "peft.tuners.lora.layer.LoraLayer" [arrowhead="empty", arrowtail="none"];
"peft.tuners.lora.layer.Conv2d" -> "peft.tuners.lora.layer._ConvNd" [arrowhead="empty", arrowtail="none"];
"peft.tuners.lora.layer.Conv3d" -> "peft.tuners.lora.layer._ConvNd" [arrowhead="empty", arrowtail="none"];
"peft.tuners.lora.layer.Embedding" -> "peft.tuners.lora.layer.LoraLayer" [arrowhead="empty", arrowtail="none"];
"peft.tuners.lora.layer.Linear" -> "peft.tuners.lora.layer.LoraLayer" [arrowhead="empty", arrowtail="none"];
"peft.tuners.lora.layer._ConvNd" -> "peft.tuners.lora.layer.LoraLayer" [arrowhead="empty", arrowtail="none"];
"peft.tuners.lora.torchao.TorchaoLoraLinear" -> "peft.tuners.lora.layer.Linear" [arrowhead="empty", arrowtail="none"];
"peft.tuners.lora.tp_layer.LoraParallelLinear" -> "peft.tuners.lora.layer.LoraLayer" [arrowhead="empty", arrowtail="none"];
"peft.tuners.lora.config.LoraRuntimeConfig" -> "peft.tuners.lora.config.LoraConfig" [arrowhead="diamond", arrowtail="none", fontcolor="green", label="runtime_config", style="solid"];
}