Reputation: 1
I got a traceback call: "TypeError: init() got an unexpected keyword argument 'period'" , and I'm not sure where the keyword argument 'period' is coming from .(This is my first time using stackoverflow , so if more detail is needed I'll try my best to explain more.
#@markdown If CUDA runs out of memory, try the following:
#@markdown * Click on Runtime -> Restart runtime, re-run step 3, and try again.
#@markdown * If that doesn't help, reduce the batch size (default 64).
batch_size = 40 #@param {type:"integer"}
epochs = 20
learning_rate = 1e-3
min_learning_rate = 3e-6
load_checkpoints = True
import os
from hydra.experimental import compose, initialize
from hydra.core.global_hydra import GlobalHydra
from omegaconf import OmegaConf
import pytorch_lightning as pl
from nemo.collections.common.callbacks import LogEpochTimeCallback
from nemo.collections.tts.models import TalkNetDursModel
from nemo.core.config import hydra_runner
from nemo.utils.exp_manager import exp_manager
def train(cfg):
cfg.sample_rate = 22050
cfg.train_dataset = "trainfiles.json"
cfg.validation_datasets = "valfiles.json"
cfg.durs_file = os.path.join(output_dir, "durations.pt")
cfg.f0_file = os.path.join(output_dir, "f0s.pt")
cfg.trainer.accelerator = "dp"
cfg.trainer.max_epochs = epochs
cfg.trainer.check_val_every_n_epoch = 5
cfg.model.train_ds.dataloader_params.batch_size = batch_size
cfg.model.validation_ds.dataloader_params.batch_size = batch_size
cfg.model.optim.lr = learning_rate
cfg.model.optim.sched.min_lr = min_learning_rate
cfg.exp_manager.exp_dir = output_dir
# Find checkpoints
ckpt_path = ""
if load_checkpoints:
path0 = os.path.join(output_dir, "TalkNetDurs")
if os.path.exists(path0):
path1 = sorted(os.listdir(path0))
for i in range(len(path1)):
path2 = os.path.join(path0, path1[-(1+i)], "checkpoints")
if os.path.exists(path2):
match = [x for x in os.listdir(path2) if "last.ckpt" in x]
if len(match) > 0:
ckpt_path = os.path.join(path2, match[0])
print("Resuming training from " + match[0])
break
if ckpt_path != "":
trainer = pl.Trainer(**cfg.trainer, resume_from_checkpoint = ckpt_path)
model = TalkNetDursModel(cfg=cfg.model, trainer=trainer)
else:
warmstart_path = "/content/talknet_durs.nemo"
trainer = pl.Trainer(**cfg.trainer)
model = TalkNetDursModel.restore_from(warmstart_path, override_config_path=cfg)
model.set_trainer(trainer)
model.setup_training_data(cfg.model.train_ds)
model.setup_validation_data(cfg.model.validation_ds)
model.setup_optimization(cfg.model.optim)
print("Warm-starting from " + warmstart_path)
exp_manager(trainer, cfg.get('exp_manager', None))
trainer.callbacks.extend([pl.callbacks.LearningRateMonitor(), LogEpochTimeCallback()]) # noqa
trainer.fit(model)
GlobalHydra().clear()
initialize(config_path="conf")
cfg = compose(config_name="talknet-durs")
train(cfg)
Not sure what this error means ,I'm not sure how to go about debugging this error. Could this possibly be an import error?
<ipython-input-11-9d8efe1a775b> in <module>
70 initialize(config_path="conf")
71 cfg = compose(config_name="talknet-durs")
---> 72 train(cfg)
3 frames
/usr/local/lib/python3.7/dist-packages/nemo/utils/exp_manager.py in __init__(self, always_save_nemo, save_best_model, postfix, **kwargs)
597
598 # Call the parent class constructor with the remaining kwargs.
--> 599 super().__init__(**kwargs)
600
601 @rank_zero_only
TypeError: __init__() got an unexpected keyword argument 'period'```
Upvotes: 0
Views: 1723