Doc-to-LoRA release

This commit is contained in:
51616 2026-02-27 03:47:04 +00:00
commit 1abe8ae16d
92 changed files with 22131 additions and 0 deletions

1
scripts/niah/0-gen_data.sh Executable file
View file

@ -0,0 +1 @@
uv run data/generate_ctx_magic_number.py

42
scripts/niah/1-train.sh Executable file
View file

@ -0,0 +1,42 @@
#!/bin/bash
WANDB_MODE=disabled uv run train.py \
configs/niah_exp/ctx_magic_number_32_256.yaml \
--model_name_or_path=google/gemma-2-2b-it \
--num_train_epochs=1 \
--per_device_train_batch_size=-1 \
--gradient_accumulation_steps=16 \
--per_device_eval_batch_size=16 \
--exp_setup=hyper_lora \
--aggregator_type=perceiver \
--target_modules=down_proj \
--num_blocks=8 \
--num_self_attn_per_block=0 \
--num_pre_head_layers=1 \
--lora_r=8 \
--eval_steps=100 \
--logging_steps=10 \
--save_steps=1000 \
--learning_rate=4e-5 \
--lora_dropout=0.0 \
--neftune_noise_alpha=0 \
--per_rank_gen=True \
--per_layer_processing=True \
--gen_lora_l1_reg_coef=1.5 \
--use_sequence_packing=True \
--max_packed_inp_len=4096 \
--max_packed_ctx_len=4096 \
--dataloader_num_workers=0 \
--dataloader_prefetch_factor=None \
--eval_on_start=False \
--ctx_encoder_type=early_exit \
--n_latent_queries=208 \
--use_kl_loss=False \
--eval_on_start=True \
--max_ctx_chunk_len=512 \
--min_ctx_chunk_len=25 \
--num_chunk_probs='{"1":"0.5", "2":"0.125", "3":"0.0625", "4":"0.0625", "5":"0.0625", "6":"0.0625", "7":"0.0625", "8":"0.0625"}' \
--max_val_samples_per_ds=100 \
--seed=1 \
--use_per_ctx_average_loss=True \
--torch_empty_cache_steps=10 \
"$@"

1
scripts/niah/2-eval-test.sh Executable file
View file

@ -0,0 +1 @@
WANDB_MODE=disabled uv run run_eval.py --checkpoint_path CHECKPOINT_PATH --datasets ctx_magic_number_32_1024 ctx_magic_number_1024_2048 ctx_magic_number_3072_4096 ctx_magic_number_7168_8192 ctx_magic_number_15360_16384 ctx_magic_number_28672_32768 ctx_magic_number_57344_65536 ctx_magic_number_122880_131072 --max_ctx_chunk_len=1024 --split test --eval_batch_size_gen=4

1
scripts/niah/2-eval.sh Executable file
View file

@ -0,0 +1 @@
WANDB_MODE=disabled uv run run_eval.py --checkpoint_path $CHECKPOINT_PATH --datasets ctx_magic_number_32_1024 ctx_magic_number_1024_2048 ctx_magic_number_2048_3072 ctx_magic_number_3072_4096 ctx_magic_number_4096_5120 ctx_magic_number_5120_6144 ctx_magic_number_6144_7168 ctx_magic_number_7168_8192 ctx_magic_number_8192_9216 ctx_magic_number_9216_10240 ctx_magic_number_10240_11264 ctx_magic_number_11264_12288 ctx_magic_number_12288_13312 ctx_magic_number_13312_14336 ctx_magic_number_14336_15360 ctx_magic_number_15360_16384 ctx_magic_number_16384_20480 ctx_magic_number_20480_24576 ctx_magic_number_24576_28672 ctx_magic_number_28672_32768 ctx_magic_number_32768_40960 ctx_magic_number_40960_49152 ctx_magic_number_49152_57344 ctx_magic_number_57344_65536 ctx_magic_number_65536_73728 ctx_magic_number_73728_81920 ctx_magic_number_81920_90112 ctx_magic_number_90112_98304 ctx_magic_number_98304_106496 ctx_magic_number_106496_114688 ctx_magic_number_114688_122880 ctx_magic_number_122880_131072 --max_ctx_chunk_len=1024 --split test --eval_batch_size_gen=4

8
scripts/niah/README.md Normal file
View file

@ -0,0 +1,8 @@
# NIAH experiment
```bash
# run the scripts in this order
# data generation is only needed to be run once
uv run bash scripts/niah/0-gen_data.sh
uv run bash scripts/niah/1-train.sh
uv run bash scripts/niah/2-eval.sh
```

View file

@ -0,0 +1,39 @@
WANDB_MODE=disabled run uv run train.py configs/niah_exp/ctx_magic_number_32_256.yaml \
--model_name_or_path=mistralai/Mistral-7B-Instruct-v0.2 \
--num_train_epochs=1 \
--per_device_train_batch_size=-1 \
--gradient_accumulation_steps=64 \
--per_device_eval_batch_size=16 \
--exp_setup=hyper_lora \
--aggregator_type=perceiver \
--target_modules=down_proj \
--num_blocks=8 \
--num_self_attn_per_block=0 \
--num_pre_head_layers=1 \
--lora_r=8 \
--eval_steps=100 \
--logging_steps=10 \
--save_steps=1000 \
--learning_rate=4e-5 \
--lora_dropout=0.0 \
--neftune_noise_alpha=0 \
--per_rank_gen=True \
--per_layer_processing=True \
--gen_lora_l1_reg_coef=2.0 \
--use_sequence_packing=True \
--max_packed_inp_len=1024 \
--max_packed_ctx_len=1024 \
--dataloader_num_workers=0 \
--dataloader_prefetch_factor=None \
--eval_on_start=False \
--ctx_encoder_type=early_exit \
--n_latent_queries=208 \
--use_kl_loss=False \
--eval_on_start=True \
--max_ctx_chunk_len=512 \
--min_ctx_chunk_len=25 \
--num_chunk_probs='{"1":"0.5", "2":"0.125", "3":"0.0625", "4":"0.0625", "5":"0.0625", "6":"0.0625", "7":"0.0625", "8":"0.0625"}' \
--max_val_samples_per_ds=100 \
--seed=1 \
--use_per_ctx_average_loss=True \
--torch_empty_cache_steps=10

View file

@ -0,0 +1,39 @@
WANDB_MODE=disabled run uv run train.py configs/niah_exp/ctx_magic_number_32_256.yaml \
--model_name_or_path=Qwen/Qwen3-4B-Instruct-2507 \
--num_train_epochs=1 \
--per_device_train_batch_size=-1 \
--gradient_accumulation_steps=32 \
--per_device_eval_batch_size=16 \
--exp_setup=hyper_lora \
--aggregator_type=perceiver \
--target_modules=down_proj \
--num_blocks=8 \
--num_self_attn_per_block=0 \
--num_pre_head_layers=1 \
--lora_r=8 \
--eval_steps=100 \
--logging_steps=10 \
--save_steps=1000 \
--learning_rate=4e-5 \
--lora_dropout=0.0 \
--neftune_noise_alpha=0 \
--per_rank_gen=True \
--per_layer_processing=True \
--gen_lora_l1_reg_coef=0.5 \
--use_sequence_packing=True \
--max_packed_inp_len=2048 \
--max_packed_ctx_len=2048 \
--dataloader_num_workers=0 \
--dataloader_prefetch_factor=None \
--eval_on_start=False \
--ctx_encoder_type=early_exit \
--n_latent_queries=208 \
--use_kl_loss=False \
--eval_on_start=True \
--max_ctx_chunk_len=512 \
--min_ctx_chunk_len=25 \
--num_chunk_probs='{"1":"0.5", "2":"0.125", "3":"0.0625", "4":"0.0625", "5":"0.0625", "6":"0.0625", "7":"0.0625", "8":"0.0625"}' \
--max_val_samples_per_ds=100 \
--seed=1 \
--use_per_ctx_average_loss=True \
--torch_empty_cache_steps=10