[ PROMPT_NODE_22696 ]
Mlops Weights And Biases 集成
[ SKILL_DOCUMENTATION ]
# Framework Integrations Guide
Complete guide to integrating W&B with popular ML frameworks.
## Table of Contents
- HuggingFace Transformers
- PyTorch Lightning
- Keras/TensorFlow
- Fast.ai
- XGBoost/LightGBM
- PyTorch Native
- Custom Integrations
## HuggingFace Transformers
### Automatic Integration
python
from transformers import Trainer, TrainingArguments
import wandb
# Initialize W&B
wandb.init(project="hf-transformers", name="bert-finetuning")
# Training arguments with W&B
training_args = TrainingArguments(
output_dir="./results",
report_to="wandb", # Enable W&B logging
run_name="bert-base-finetuning",
# Training params
num_train_epochs=3,
per_device_train_batch_size=16,
per_device_eval_batch_size=64,
learning_rate=2e-5,
# Logging
logging_dir="./logs",
logging_steps=100,
logging_first_step=True,
# Evaluation
evaluation_strategy="steps",
eval_steps=500,
save_steps=500,
# Other
load_best_model_at_end=True,
metric_for_best_model="eval_accuracy"
)
# Trainer automatically logs to W&B
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics
)
# Train (metrics logged automatically)
trainer.train()
# Finish W&B run
wandb.finish()
### Custom Logging
python
from transformers import Trainer, TrainingArguments
from transformers.integrations import WandbCallback
import wandb
class CustomWandbCallback(WandbCallback):
def on_evaluate(self, args, state, control, metrics=None, **kwargs):
super().on_evaluate(args, state, control, metrics, **kwargs)
# Log custom metrics
wandb.log({
"custom/eval_score": metrics["eval_accuracy"] * 100,
"custom/epoch": state.epoch
})
# Use custom callback
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
callbacks=[CustomWandbCallback()]
)
### Log Model to Registry
python
from transformers import Trainer, TrainingArguments
training_args = TrainingArguments(
output_dir="./results",
report_to="wandb",
load_best_model_at_end=True
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset
)
trainer.train()
# Save final model as artifact
model_artifact = wandb.Artifact(
'hf-bert-model',
type='model',
de