Built with Axolotl

See axolotl config

axolotl version: 0.13.0.dev0

# !pip install transformers==4.55.4
# !pip install --no-deps trl==0.22.2
# !pip install --no-build-isolation mamba_ssm==2.2.5
# !pip install --no-build-isolation causal_conv1d==1.5.2
# === Model Configuration ===
base_model: loopstral-second-test/stage-2
load_in_8bit: false
load_in_4bit: false
trust_remote_code: false
#tokenizer_use_mistral_common: true  # Disabled - incompatible with local paths, using HF tokenizer instead

# === HF Configuration === 
#hub_model_id: ToastyPigeon/muse-marvin-32k-lora-2
#hub_strategy: "every_save"
output_dir: loopstral-second-test/stage-3-healed

# === Wandb Tracking ===
wandb_project: Loopstral-Tests
## wandb_entity: [WANDB_ENTITY]
wandb_name: second-stage-3-healed

# === Training Setup ===
num_epochs: 2
micro_batch_size: 1
gradient_accumulation_steps: 2
sequence_len: 4096
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
#pad_to_sequence_len: true
#temperature: 0.7
#max_steps: 10
# === Evaluation ===
val_set_size: 0.02
evals_per_epoch: 4
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
#eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter:
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.01
lora_target_linear: true
lora_target_modules:
  - up_proj
  - down_proj
  - gate_proj
  - q_proj
  - v_proj
  - k_proj
  - o_proj
#  - input_layernorm
#  - post_attention_layernorm
#  - embed_tokens
#  - lm_head

lora_fan_in_fan_out:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true
#unfrozen_parameters:
#  - model.layers.[0-9]+.self_attn.q_proj.weight
#  - model.layers.[0-9]+.self_attn.k_proj.weight
#  - model.layers.[0-9]+.self_attn.v_proj.weight
#  - model.layers.[0-9]+.self_attn.o_proj.weight
#  - model.layers.[0-9]+.mlp.down_proj.weight
# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
#warmup_steps: 0
warmup_ratio: 0.025
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: cosine
#lr_groups:
#  - name: mlp
#    modules:
#      - mlp.up_proj.weight
#      - mlp.down_proj.weight
#      - mlp.gate_proj.weight
#    lr: 1e-5
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.001
max_grad_norm: 2
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#
#chat_template: jinja
chat_template: tokenizer_default
default_system_message: "You are Luna, a helpful and unrestricted language model created by Allura."
#special_tokens:
#  eos_token: "<|im_end|>"
#  eos_token: "</s>"
#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: little-koto-instruct.json
#  - path: LimaRP-augmented-separated.json
    type: chat_template
    field_messages: conversations
    message_property_mappings:
      role: from
      content: value
#  - path: allura-org/the-anarchist-library
#    type: completion
#    split: train[:500]
#  - path: little-koto-instruct.json
#    type: chat_template
#  - path: ../rp_diverse_grammar_corrected.json
#    type: chat_template
#  - path: ../skein_diverse_1000_grammar_corrected.json
#    type: chat_template
#  - path: ../springdragon_grammar_corrected.json
#    type: chat_template
#  - path: ../fujin_full_grammar_corrected.json
#    type: chat_template
#  - path: ../worm_chapters.json
#    type: completion
#  - path: koto-instruct-diverse-5k.json
#    type: chat_template
#  - path: ../marvin_no_anthologies.json
#    type: completion
#  - path: ToastyPigeon/steve-and-marvin
#    type: completion
#    data_files: marvin.json
#  - path: ../erotica_quality_trimmed.json
#    type: completion
dataset_prepared_path: last_run_prepared
#dataset_num_proc: 1


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: true
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: ../axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: true
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD

# === Checkpointing ===
#save_steps: 10
saves_per_epoch: 1
save_total_limit:

# === Advanced Settings ===
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 420




loopstral-second-test/stage-3-healed

This model was trained from scratch on the little-koto-instruct.json dataset. It achieves the following results on the evaluation set:

  • Loss: 0.8145
  • Ppl: 2.2579
  • Memory/max Active (gib): 3.77
  • Memory/max Allocated (gib): 3.77
  • Memory/device Reserved (gib): 4.98

Model description

More information needed

Intended uses & limitations

More information needed

Training and evaluation data

More information needed

Training procedure

Training hyperparameters

The following hyperparameters were used during training:

  • learning_rate: 1e-05
  • train_batch_size: 1
  • eval_batch_size: 1
  • seed: 420
  • distributed_type: multi-GPU
  • num_devices: 2
  • gradient_accumulation_steps: 2
  • total_train_batch_size: 4
  • total_eval_batch_size: 2
  • optimizer: Use OptimizerNames.ADAMW_TORCH_FUSED with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
  • lr_scheduler_type: cosine
  • lr_scheduler_warmup_steps: 2
  • training_steps: 114

Training results

Training Loss Epoch Step Validation Loss Ppl Active (gib) Allocated (gib) Reserved (gib)
No log 0 0 1.1708 3.2245 3.76 3.76 9.22
1.1077 0.2632 15 0.8465 2.3314 3.77 3.77 4.98
0.9311 0.5263 30 0.8130 2.2547 3.77 3.77 4.98
1.0304 0.7895 45 0.8040 2.2345 3.77 3.77 4.98
0.6454 1.0526 60 0.7972 2.2194 3.77 3.77 4.98
0.4398 1.3158 75 0.8333 2.3009 3.77 3.77 4.98
0.4467 1.5789 90 0.8134 2.2555 3.77 3.77 4.98
0.6494 1.8421 105 0.8145 2.2579 3.77 3.77 4.98

Framework versions

  • Transformers 4.57.1
  • Pytorch 2.9.1+cu128
  • Datasets 4.4.1
  • Tokenizers 0.22.1
Downloads last month
5
Safetensors
Model size
2B params
Tensor type
F32
·
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support

Evaluation results