|
55 | 55 | from trl import SFTTrainer, DataCollatorForCompletionOnlyLM
|
56 | 56 | from trl.trainer import ConstantLengthDataset
|
57 | 57 | from peft import LoraConfig, get_peft_model
|
58 |
| -from pypgrx import print_info, insert_logs |
59 | 58 | from abc import abstractmethod
|
60 | 59 |
|
61 | 60 | transformers.logging.set_verbosity_info()
|
@@ -1017,8 +1016,7 @@ def on_log(self, args, state, control, logs=None, **kwargs):
|
1017 | 1016 | logs["step"] = state.global_step
|
1018 | 1017 | logs["max_steps"] = state.max_steps
|
1019 | 1018 | logs["timestamp"] = str(datetime.now())
|
1020 |
| - print_info(json.dumps(logs, indent=4)) |
1021 |
| - insert_logs(self.project_id, self.model_id, json.dumps(logs)) |
| 1019 | + r_print_info(json.dumps(logs, indent=4)) |
1022 | 1020 |
|
1023 | 1021 |
|
1024 | 1022 | class FineTuningBase:
|
@@ -1100,9 +1098,9 @@ def print_number_of_trainable_model_parameters(self, model):
|
1100 | 1098 | trainable_model_params += param.numel()
|
1101 | 1099 |
|
1102 | 1100 | # Calculate and print the number and percentage of trainable parameters
|
1103 |
| - print_info(f"Trainable model parameters: {trainable_model_params}") |
1104 |
| - print_info(f"All model parameters: {all_model_params}") |
1105 |
| - print_info( |
| 1101 | + r_print_info(f"Trainable model parameters: {trainable_model_params}") |
| 1102 | + r_print_info(f"All model parameters: {all_model_params}") |
| 1103 | + r_print_info( |
1106 | 1104 | f"Percentage of trainable model parameters: {100 * trainable_model_params / all_model_params:.2f}%"
|
1107 | 1105 | )
|
1108 | 1106 |
|
@@ -1398,7 +1396,7 @@ def __init__(
|
1398 | 1396 | "bias": "none",
|
1399 | 1397 | "task_type": "CAUSAL_LM",
|
1400 | 1398 | }
|
1401 |
| - print_info( |
| 1399 | + r_print_info( |
1402 | 1400 | "LoRA configuration are not set. Using default parameters"
|
1403 | 1401 | + json.dumps(self.lora_config_params)
|
1404 | 1402 | )
|
@@ -1465,7 +1463,7 @@ def formatting_prompts_func(example):
|
1465 | 1463 | peft_config=LoraConfig(**self.lora_config_params),
|
1466 | 1464 | callbacks=[PGMLCallback(self.project_id, self.model_id)],
|
1467 | 1465 | )
|
1468 |
| - print_info("Creating Supervised Fine Tuning trainer done. Training ... ") |
| 1466 | + r_print_info("Creating Supervised Fine Tuning trainer done. Training ... ") |
1469 | 1467 |
|
1470 | 1468 | # Train
|
1471 | 1469 | self.trainer.train()
|
|
0 commit comments