| { | |
| "alpha_pattern": {}, | |
| "auto_mapping": null, | |
| "base_model_name_or_path": "Qwen/Qwen3-Next-80B-A3B-Thinking", | |
| "bias": "none", | |
| "corda_config": null, | |
| "eva_config": null, | |
| "exclude_modules": null, | |
| "fan_in_fan_out": null, | |
| "inference_mode": true, | |
| "init_lora_weights": true, | |
| "layer_replication": null, | |
| "layers_pattern": null, | |
| "layers_to_transform": null, | |
| "loftq_config": {}, | |
| "lora_alpha": 64, | |
| "lora_bias": false, | |
| "lora_dropout": 0.05, | |
| "megatron_config": null, | |
| "megatron_core": "megatron.core", | |
| "modules_to_save": null, | |
| "peft_type": "LORA", | |
| "qalora_group_size": 16, | |
| "r": 32, | |
| "rank_pattern": {}, | |
| "revision": null, | |
| "target_modules": [ | |
| "shared_expert.gate_proj", | |
| "mlp.experts.*.up_proj", | |
| "shared_expert.up_proj", | |
| "mlp.gate", | |
| "linear_attn.in_proj_ba", | |
| "linear_attn.out_proj", | |
| "v_proj", | |
| "k_proj", | |
| "shared_expert.down_proj", | |
| "q_proj", | |
| "o_proj", | |
| "mlp.experts.*.gate_proj", | |
| "linear_attn.in_proj_qkvz", | |
| "mlp.experts.*.down_proj" | |
| ], | |
| "target_parameters": [], | |
| "task_type": "CAUSAL_LM", | |
| "trainable_token_indices": null, | |
| "use_dora": false, | |
| "use_qalora": false, | |
| "use_rslora": false | |
| } |