首页 > 其他分享 >BaseTrainingArguments

BaseTrainingArguments

时间:2022-12-09 11:56:05浏览次数:42  
标签:BaseTrainingArguments False hub None steps eval True

TrainingArguments(
_n_gpu=1,########################################GPU
adafactor=False,
adam_beta1=0.9,
adam_beta2=0.999,
adam_epsilon=1e-08,
auto_find_batch_size=False,
bf16=False,
bf16_full_eval=False,
data_seed=None,
dataloader_drop_last=False,########################dataloader
dataloader_num_workers=0,
dataloader_pin_memory=True,######################dataloader
ddp_bucket_cap_mb=None,
ddp_find_unused_parameters=None,
ddp_timeout=1800,
debug=[],
deepspeed=None,###############################deepspeed
disable_tqdm=False,
do_eval=True,##################################eval
do_predict=False,###############################predict
do_train=True,##################################train
eval_accumulation_steps=None,
eval_delay=0,
eval_steps=None,#############################eval steps
evaluation_strategy=no,
fp16=False,######################################fp16
fp16_backend=auto,
fp16_full_eval=False,
fp16_opt_level=O1,
fsdp=[],
fsdp_min_num_params=0,
fsdp_transformer_layer_cls_to_wrap=None,
full_determinism=False,
gradient_accumulation_steps=1,
gradient_checkpointing=False,
greater_is_better=None,
group_by_length=False,
half_precision_backend=auto,
hub_model_id=None,
hub_private_repo=False,
hub_strategy=every_save,
hub_token=<HUB_TOKEN>,
ignore_data_skip=False,
include_inputs_for_metrics=False,
jit_mode_eval=False,
label_names=None,
label_smoothing_factor=0.0,
learning_rate=5e-05,##############################lr
length_column_name=length,
load_best_model_at_end=False,
local_rank=-1,####################################locak rank
log_level=passive,
log_level_replica=passive,
log_on_each_node=True,
logging_dir=/home/hejiabang/sparse-and-robust-PLM/log/full_bert/MNLI/epoch3_lr5e-5/1/logging,
logging_first_step=False,
logging_nan_inf_filter=True,
logging_steps=1000,######################################logging_steps
logging_strategy=steps,
lr_scheduler_type=linear,
max_grad_norm=1.0,#####################################max_grad_norm
max_steps=-1,
metric_for_best_model=None,
mp_parameters=,
no_cuda=False,
num_train_epochs=3.0,######################################epochs
optim=adamw_hf,##########################################optim
output_dir=/home/hejiabang/sparse-and-robust-PLM/log/full_bert/MNLI/epoch3_lr5e-5/1,######output_dir
overwrite_output_dir=False,##################overwrite output
past_index=-1,
per_device_eval_batch_size=8,##################device batch eval
per_device_train_batch_size=32,#################device batch train
prediction_loss_only=False,
push_to_hub=False,
push_to_hub_model_id=None,
push_to_hub_organization=None,
push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
ray_scope=last,
remove_unused_columns=True,
report_to=['tensorboard', 'wandb'],
resume_from_checkpoint=None,
run_name=/home/hejiabang/sparse-and-robust-PLM/log/full_bert/MNLI/epoch3_lr5e-5/1,
save_on_each_node=False,
save_steps=0,################################模型保存 steps
save_strategy=steps,
save_total_limit=None,
seed=1,#####################################seed
sharded_ddp=[],
skip_memory_metrics=True,
tf32=None,
torchdynamo=None,
tpu_metrics_debug=False,
tpu_num_cores=None,
use_ipex=False,
use_legacy_prediction_loop=False,
use_mps_device=False,
warmup_ratio=0.0,###############################warm_up_ratio
warmup_steps=3600,#############################warm_up_steps
weight_decay=0.0,###############################weight_decay
xpu_backend=None,
)

标签:BaseTrainingArguments,False,hub,None,steps,eval,True
From: https://www.cnblogs.com/Tsukinousag1/p/16968525.html

相关文章